1use super::*;
2use crate::udiff::apply_diff_to_string;
3use client::{RefreshLlmTokenListener, UserStore, test::FakeServer};
4use clock::FakeSystemClock;
5use clock::ReplicaId;
6use cloud_api_types::{CreateLlmTokenResponse, LlmToken};
7use cloud_llm_client::{
8 EditPredictionRejectReason, EditPredictionRejection, RejectEditPredictionsBody,
9 predict_edits_v3::{PredictEditsV3Request, PredictEditsV3Response},
10};
11
12use futures::{
13 AsyncReadExt, FutureExt, StreamExt,
14 channel::{mpsc, oneshot},
15};
16use gpui::App;
17use gpui::{
18 Entity, TestAppContext,
19 http_client::{FakeHttpClient, Response},
20};
21use indoc::indoc;
22use language::{
23 Anchor, Buffer, Capability, CursorShape, Diagnostic, DiagnosticEntry, DiagnosticSet,
24 DiagnosticSeverity, Operation, Point, Selection, SelectionGoal,
25};
26
27use lsp::LanguageServerId;
28use parking_lot::Mutex;
29use pretty_assertions::{assert_eq, assert_matches};
30use project::{FakeFs, Project};
31use serde_json::json;
32use settings::SettingsStore;
33use std::{ops::Range, path::Path, sync::Arc, time::Duration};
34use util::{
35 path,
36 test::{TextRangeMarker, marked_text_ranges_by},
37};
38use uuid::Uuid;
39use workspace::{AppState, CollaboratorId, MultiWorkspace};
40use zeta_prompt::ZetaPromptInput;
41
42use crate::{
43 BufferEditPrediction, EDIT_PREDICTION_SETTLED_QUIESCENCE, EditPredictionId,
44 EditPredictionJumpsFeatureFlag, EditPredictionStore, REJECT_REQUEST_DEBOUNCE,
45};
46
47#[gpui::test]
48async fn test_current_state(cx: &mut TestAppContext) {
49 let (ep_store, mut requests) = init_test_with_fake_client(cx);
50 let fs = FakeFs::new(cx.executor());
51 fs.insert_tree(
52 "/root",
53 json!({
54 "1.txt": "Hello!\nHow\nBye\n",
55 "2.txt": "Hola!\nComo\nAdios\n"
56 }),
57 )
58 .await;
59 let project = Project::test(fs, vec![path!("/root").as_ref()], cx).await;
60
61 let buffer1 = project
62 .update(cx, |project, cx| {
63 let path = project.find_project_path(path!("/root/1.txt"), cx).unwrap();
64 project.set_active_path(Some(path.clone()), cx);
65 project.open_buffer(path, cx)
66 })
67 .await
68 .unwrap();
69 let snapshot1 = buffer1.read_with(cx, |buffer, _cx| buffer.snapshot());
70 let position = snapshot1.anchor_before(language::Point::new(1, 3));
71
72 ep_store.update(cx, |ep_store, cx| {
73 ep_store.register_project(&project, cx);
74 ep_store.register_buffer(&buffer1, &project, cx);
75 });
76
77 // Prediction for current file
78
79 ep_store.update(cx, |ep_store, cx| {
80 ep_store.refresh_prediction_from_buffer(project.clone(), buffer1.clone(), position, cx)
81 });
82 let (request, respond_tx) = requests.predict.next().await.unwrap();
83
84 respond_tx
85 .send(model_response(
86 &request,
87 indoc! {r"
88 --- a/root/1.txt
89 +++ b/root/1.txt
90 @@ ... @@
91 Hello!
92 -How
93 +How are you?
94 Bye
95 "},
96 ))
97 .unwrap();
98
99 cx.run_until_parked();
100
101 ep_store.update(cx, |ep_store, cx| {
102 let prediction = ep_store
103 .prediction_at(&buffer1, None, &project, cx)
104 .unwrap();
105 assert_matches!(prediction, BufferEditPrediction::Local { .. });
106 });
107
108 ep_store.update(cx, |ep_store, cx| {
109 ep_store.reject_current_prediction(EditPredictionRejectReason::Discarded, &project, cx);
110 });
111
112 // Prediction for diagnostic in another file
113
114 let diagnostic = lsp::Diagnostic {
115 range: lsp::Range::new(lsp::Position::new(1, 1), lsp::Position::new(1, 5)),
116 severity: Some(lsp::DiagnosticSeverity::ERROR),
117 message: "Sentence is incomplete".to_string(),
118 ..Default::default()
119 };
120
121 project.update(cx, |project, cx| {
122 project.lsp_store().update(cx, |lsp_store, cx| {
123 lsp_store
124 .update_diagnostics(
125 LanguageServerId(0),
126 lsp::PublishDiagnosticsParams {
127 uri: lsp::Uri::from_file_path(path!("/root/2.txt")).unwrap(),
128 diagnostics: vec![diagnostic],
129 version: None,
130 },
131 None,
132 language::DiagnosticSourceKind::Pushed,
133 &[],
134 cx,
135 )
136 .unwrap();
137 });
138 });
139
140 let (request, respond_tx) = requests.predict.next().await.unwrap();
141 respond_tx
142 .send(model_response(
143 &request,
144 indoc! {r#"
145 --- a/root/2.txt
146 +++ b/root/2.txt
147 @@ ... @@
148 Hola!
149 -Como
150 +Como estas?
151 Adios
152 "#},
153 ))
154 .unwrap();
155 cx.run_until_parked();
156
157 ep_store.update(cx, |ep_store, cx| {
158 let prediction = ep_store
159 .prediction_at(&buffer1, None, &project, cx)
160 .unwrap();
161 assert_matches!(
162 prediction,
163 BufferEditPrediction::Jump { prediction } if prediction.snapshot.file().unwrap().full_path(cx) == Path::new(path!("root/2.txt"))
164 );
165 });
166
167 let buffer2 = project
168 .update(cx, |project, cx| {
169 let path = project.find_project_path(path!("root/2.txt"), cx).unwrap();
170 project.open_buffer(path, cx)
171 })
172 .await
173 .unwrap();
174
175 ep_store.update(cx, |ep_store, cx| {
176 let prediction = ep_store
177 .prediction_at(&buffer2, None, &project, cx)
178 .unwrap();
179 assert_matches!(prediction, BufferEditPrediction::Local { .. });
180 });
181}
182
183#[gpui::test]
184async fn test_diagnostics_refresh_suppressed_while_following(cx: &mut TestAppContext) {
185 let (ep_store, mut requests) = init_test_with_fake_client(cx);
186
187 cx.update(|cx| {
188 cx.update_flags(
189 false,
190 vec![EditPredictionJumpsFeatureFlag::NAME.to_string()],
191 );
192 });
193
194 let fs = FakeFs::new(cx.executor());
195 fs.insert_tree(
196 "/root",
197 json!({
198 "1.txt": "Hello!\nHow\nBye\n",
199 "2.txt": "Hola!\nComo\nAdios\n"
200 }),
201 )
202 .await;
203 let project = Project::test(fs, vec![path!("/root").as_ref()], cx).await;
204
205 let app_state = cx.update(|cx| {
206 let app_state = AppState::test(cx);
207 AppState::set_global(app_state.clone(), cx);
208 app_state
209 });
210
211 let multi_workspace =
212 cx.add_window(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx));
213 let workspace = multi_workspace
214 .read_with(cx, |multi_workspace, _| multi_workspace.workspace().clone())
215 .unwrap();
216 cx.update(|cx| {
217 AppState::set_global(workspace.read(cx).app_state().clone(), cx);
218 });
219 let _ = app_state;
220
221 let buffer1 = project
222 .update(cx, |project, cx| {
223 let path = project.find_project_path(path!("root/1.txt"), cx).unwrap();
224 project.set_active_path(Some(path.clone()), cx);
225 project.open_buffer(path, cx)
226 })
227 .await
228 .unwrap();
229 let snapshot1 = buffer1.read_with(cx, |buffer, _cx| buffer.snapshot());
230 let position = snapshot1.anchor_before(language::Point::new(1, 3));
231
232 ep_store.update(cx, |ep_store, cx| {
233 ep_store.register_project(&project, cx);
234 ep_store.register_buffer(&buffer1, &project, cx);
235 ep_store.refresh_prediction_from_buffer(project.clone(), buffer1.clone(), position, cx);
236 });
237
238 let (request, respond_tx) = requests.predict.next().await.unwrap();
239 respond_tx
240 .send(model_response(
241 &request,
242 indoc! {r"
243 --- a/root/1.txt
244 +++ b/root/1.txt
245 @@ ... @@
246 Hello!
247 -How
248 +How are you?
249 Bye
250 "},
251 ))
252 .unwrap();
253 cx.run_until_parked();
254
255 ep_store.update(cx, |ep_store, cx| {
256 ep_store.reject_current_prediction(EditPredictionRejectReason::Discarded, &project, cx);
257 });
258
259 let _ = multi_workspace.update(cx, |multi_workspace, window, cx| {
260 multi_workspace.workspace().update(cx, |workspace, cx| {
261 workspace.start_following(CollaboratorId::Agent, window, cx);
262 });
263 });
264 cx.run_until_parked();
265
266 let diagnostic = lsp::Diagnostic {
267 range: lsp::Range::new(lsp::Position::new(1, 1), lsp::Position::new(1, 5)),
268 severity: Some(lsp::DiagnosticSeverity::ERROR),
269 message: "Sentence is incomplete".to_string(),
270 ..Default::default()
271 };
272
273 project.update(cx, |project, cx| {
274 project.lsp_store().update(cx, |lsp_store, cx| {
275 lsp_store
276 .update_diagnostics(
277 LanguageServerId(0),
278 lsp::PublishDiagnosticsParams {
279 uri: lsp::Uri::from_file_path(path!("/root/2.txt")).unwrap(),
280 diagnostics: vec![diagnostic.clone()],
281 version: None,
282 },
283 None,
284 language::DiagnosticSourceKind::Pushed,
285 &[],
286 cx,
287 )
288 .unwrap();
289 });
290 });
291
292 cx.run_until_parked();
293 assert_no_predict_request_ready(&mut requests.predict);
294
295 let _ = multi_workspace.update(cx, |multi_workspace, window, cx| {
296 multi_workspace.workspace().update(cx, |workspace, cx| {
297 workspace.unfollow(CollaboratorId::Agent, window, cx);
298 });
299 });
300 cx.run_until_parked();
301
302 project.update(cx, |project, cx| {
303 project.lsp_store().update(cx, |lsp_store, cx| {
304 lsp_store
305 .update_diagnostics(
306 LanguageServerId(0),
307 lsp::PublishDiagnosticsParams {
308 uri: lsp::Uri::from_file_path(path!("/root/2.txt")).unwrap(),
309 diagnostics: vec![diagnostic],
310 version: None,
311 },
312 None,
313 language::DiagnosticSourceKind::Pushed,
314 &[],
315 cx,
316 )
317 .unwrap();
318 });
319 });
320
321 let (request, respond_tx) = requests.predict.next().await.unwrap();
322 respond_tx
323 .send(model_response(
324 &request,
325 indoc! {r#"
326 --- a/root/2.txt
327 +++ b/root/2.txt
328 @@ ... @@
329 Hola!
330 -Como
331 +Como estas?
332 Adios
333 "#},
334 ))
335 .unwrap();
336 cx.run_until_parked();
337
338 ep_store.update(cx, |ep_store, cx| {
339 let prediction = ep_store
340 .prediction_at(&buffer1, None, &project, cx)
341 .unwrap();
342 assert_matches!(
343 prediction,
344 BufferEditPrediction::Jump { prediction } if prediction.snapshot.file().unwrap().full_path(cx) == Path::new(path!("root/2.txt"))
345 );
346 });
347}
348
349#[gpui::test]
350async fn test_simple_request(cx: &mut TestAppContext) {
351 let (ep_store, mut requests) = init_test_with_fake_client(cx);
352 let fs = FakeFs::new(cx.executor());
353 fs.insert_tree(
354 "/root",
355 json!({
356 "foo.md": "Hello!\nHow\nBye\n"
357 }),
358 )
359 .await;
360 let project = Project::test(fs, vec![path!("/root").as_ref()], cx).await;
361
362 let buffer = project
363 .update(cx, |project, cx| {
364 let path = project.find_project_path(path!("root/foo.md"), cx).unwrap();
365 project.open_buffer(path, cx)
366 })
367 .await
368 .unwrap();
369 let snapshot = buffer.read_with(cx, |buffer, _cx| buffer.snapshot());
370 let position = snapshot.anchor_before(language::Point::new(1, 3));
371
372 let prediction_task = ep_store.update(cx, |ep_store, cx| {
373 ep_store.request_prediction(&project, &buffer, position, Default::default(), cx)
374 });
375
376 let (request, respond_tx) = requests.predict.next().await.unwrap();
377
378 // TODO Put back when we have a structured request again
379 // assert_eq!(
380 // request.excerpt_path.as_ref(),
381 // Path::new(path!("root/foo.md"))
382 // );
383 // assert_eq!(
384 // request.cursor_point,
385 // Point {
386 // line: Line(1),
387 // column: 3
388 // }
389 // );
390
391 respond_tx
392 .send(model_response(
393 &request,
394 indoc! { r"
395 --- a/root/foo.md
396 +++ b/root/foo.md
397 @@ ... @@
398 Hello!
399 -How
400 +How are you?
401 Bye
402 "},
403 ))
404 .unwrap();
405
406 let prediction = prediction_task.await.unwrap().unwrap().prediction.unwrap();
407
408 assert_eq!(prediction.edits.len(), 1);
409 assert_eq!(
410 prediction.edits[0].0.to_point(&snapshot).start,
411 language::Point::new(1, 3)
412 );
413 assert_eq!(prediction.edits[0].1.as_ref(), " are you?");
414}
415
416#[gpui::test]
417async fn test_request_events(cx: &mut TestAppContext) {
418 let (ep_store, mut requests) = init_test_with_fake_client(cx);
419 let fs = FakeFs::new(cx.executor());
420 fs.insert_tree(
421 "/root",
422 json!({
423 "foo.md": "Hello!\n\nBye\n"
424 }),
425 )
426 .await;
427 let project = Project::test(fs, vec![path!("/root").as_ref()], cx).await;
428
429 let buffer = project
430 .update(cx, |project, cx| {
431 let path = project.find_project_path(path!("root/foo.md"), cx).unwrap();
432 project.open_buffer(path, cx)
433 })
434 .await
435 .unwrap();
436
437 ep_store.update(cx, |ep_store, cx| {
438 ep_store.register_buffer(&buffer, &project, cx);
439 });
440
441 buffer.update(cx, |buffer, cx| {
442 buffer.edit(vec![(7..7, "How")], None, cx);
443 });
444
445 let snapshot = buffer.read_with(cx, |buffer, _cx| buffer.snapshot());
446 let position = snapshot.anchor_before(language::Point::new(1, 3));
447
448 let prediction_task = ep_store.update(cx, |ep_store, cx| {
449 ep_store.request_prediction(&project, &buffer, position, Default::default(), cx)
450 });
451
452 let (request, respond_tx) = requests.predict.next().await.unwrap();
453
454 let prompt = prompt_from_request(&request);
455 assert!(
456 prompt.contains(indoc! {"
457 --- a/root/foo.md
458 +++ b/root/foo.md
459 @@ -1,3 +1,3 @@
460 Hello!
461 -
462 +How
463 Bye
464 "}),
465 "{prompt}"
466 );
467
468 respond_tx
469 .send(model_response(
470 &request,
471 indoc! {r#"
472 --- a/root/foo.md
473 +++ b/root/foo.md
474 @@ ... @@
475 Hello!
476 -How
477 +How are you?
478 Bye
479 "#},
480 ))
481 .unwrap();
482
483 let prediction = prediction_task.await.unwrap().unwrap().prediction.unwrap();
484
485 assert_eq!(prediction.edits.len(), 1);
486 assert_eq!(prediction.edits[0].1.as_ref(), " are you?");
487}
488
489#[gpui::test]
490async fn test_edit_history_getter_pause_splits_last_event(cx: &mut TestAppContext) {
491 let (ep_store, _requests) = init_test_with_fake_client(cx);
492 let fs = FakeFs::new(cx.executor());
493 fs.insert_tree(
494 "/root",
495 json!({
496 "foo.md": "Hello!\n\nBye\n"
497 }),
498 )
499 .await;
500 let project = Project::test(fs, vec![path!("/root").as_ref()], cx).await;
501
502 let buffer = project
503 .update(cx, |project, cx| {
504 let path = project.find_project_path(path!("root/foo.md"), cx).unwrap();
505 project.open_buffer(path, cx)
506 })
507 .await
508 .unwrap();
509
510 ep_store.update(cx, |ep_store, cx| {
511 ep_store.register_buffer(&buffer, &project, cx);
512 });
513
514 // First burst: insert "How"
515 buffer.update(cx, |buffer, cx| {
516 buffer.edit(vec![(7..7, "How")], None, cx);
517 });
518
519 // Simulate a pause longer than the grouping threshold (e.g. 500ms).
520 cx.executor().advance_clock(LAST_CHANGE_GROUPING_TIME * 2);
521 cx.run_until_parked();
522
523 // Second burst: append " are you?" immediately after "How" on the same line.
524 //
525 // Keeping both bursts on the same line ensures the existing line-span coalescing logic
526 // groups them into a single `LastEvent`, allowing the pause-split getter to return two diffs.
527 buffer.update(cx, |buffer, cx| {
528 buffer.edit(vec![(10..10, " are you?")], None, cx);
529 });
530
531 // A second edit shortly after the first post-pause edit ensures the last edit timestamp is
532 // advanced after the pause boundary is recorded, making pause-splitting deterministic.
533 buffer.update(cx, |buffer, cx| {
534 buffer.edit(vec![(19..19, "!")], None, cx);
535 });
536
537 // With time-based splitting, there are two distinct events.
538 let events = ep_store.update(cx, |ep_store, cx| {
539 ep_store.edit_history_for_project(&project, cx)
540 });
541 assert_eq!(events.len(), 2);
542
543 let first_total_edit_range = buffer.read_with(cx, |buffer, _| {
544 events[0].total_edit_range.to_point(&buffer.snapshot())
545 });
546 assert_eq!(first_total_edit_range, Point::new(1, 0)..Point::new(1, 3));
547
548 let zeta_prompt::Event::BufferChange { diff, .. } = events[0].event.as_ref();
549 assert_eq!(
550 diff.as_str(),
551 indoc! {"
552 @@ -1,3 +1,3 @@
553 Hello!
554 -
555 +How
556 Bye
557 "}
558 );
559
560 let second_total_edit_range = buffer.read_with(cx, |buffer, _| {
561 events[1].total_edit_range.to_point(&buffer.snapshot())
562 });
563 assert_eq!(second_total_edit_range, Point::new(1, 3)..Point::new(1, 13));
564
565 let zeta_prompt::Event::BufferChange { diff, .. } = events[1].event.as_ref();
566 assert_eq!(
567 diff.as_str(),
568 indoc! {"
569 @@ -1,3 +1,3 @@
570 Hello!
571 -How
572 +How are you?!
573 Bye
574 "}
575 );
576}
577
578#[gpui::test]
579async fn test_predicted_edits_are_separated_in_edit_history(cx: &mut TestAppContext) {
580 let (ep_store, _requests) = init_test_with_fake_client(cx);
581 let fs = FakeFs::new(cx.executor());
582
583 // Create a file with 30 lines to test line-based coalescing
584 let content = (1..=30)
585 .map(|i| format!("Line {}\n", i))
586 .collect::<String>();
587 fs.insert_tree(
588 "/root",
589 json!({
590 "foo.md": content
591 }),
592 )
593 .await;
594 let project = Project::test(fs, vec![path!("/root").as_ref()], cx).await;
595
596 let buffer = project
597 .update(cx, |project, cx| {
598 let path = project.find_project_path(path!("root/foo.md"), cx).unwrap();
599 project.open_buffer(path, cx)
600 })
601 .await
602 .unwrap();
603
604 ep_store.update(cx, |ep_store, cx| {
605 ep_store.register_buffer(&buffer, &project, cx);
606 });
607
608 // First edit: multi-line edit spanning rows 10-12 (replacing lines 11-13)
609 buffer.update(cx, |buffer, cx| {
610 let start = Point::new(10, 0).to_offset(buffer);
611 let end = Point::new(13, 0).to_offset(buffer);
612 buffer.edit(vec![(start..end, "Middle A\nMiddle B\n")], None, cx);
613 });
614
615 let events = ep_store.update(cx, |ep_store, cx| {
616 ep_store.edit_history_for_project(&project, cx)
617 });
618 assert_eq!(
619 render_events(&events),
620 indoc! {"
621 @@ -8,9 +8,8 @@
622 Line 8
623 Line 9
624 Line 10
625 -Line 11
626 -Line 12
627 -Line 13
628 +Middle A
629 +Middle B
630 Line 14
631 Line 15
632 Line 16
633 "},
634 "After first edit"
635 );
636
637 // Second edit: insert ABOVE the first edit's range (row 5, within 8 lines of row 10)
638 // This tests that coalescing considers the START of the existing range
639 buffer.update(cx, |buffer, cx| {
640 let offset = Point::new(5, 0).to_offset(buffer);
641 buffer.edit(vec![(offset..offset, "Above\n")], None, cx);
642 });
643
644 let events = ep_store.update(cx, |ep_store, cx| {
645 ep_store.edit_history_for_project(&project, cx)
646 });
647 assert_eq!(
648 render_events(&events),
649 indoc! {"
650 @@ -3,14 +3,14 @@
651 Line 3
652 Line 4
653 Line 5
654 +Above
655 Line 6
656 Line 7
657 Line 8
658 Line 9
659 Line 10
660 -Line 11
661 -Line 12
662 -Line 13
663 +Middle A
664 +Middle B
665 Line 14
666 Line 15
667 Line 16
668 "},
669 "After inserting above (should coalesce)"
670 );
671
672 // Third edit: insert BELOW the first edit's range (row 14 in current buffer, within 8 lines of row 12)
673 // This tests that coalescing considers the END of the existing range
674 buffer.update(cx, |buffer, cx| {
675 let offset = Point::new(14, 0).to_offset(buffer);
676 buffer.edit(vec![(offset..offset, "Below\n")], None, cx);
677 });
678
679 let events = ep_store.update(cx, |ep_store, cx| {
680 ep_store.edit_history_for_project(&project, cx)
681 });
682 assert_eq!(
683 render_events(&events),
684 indoc! {"
685 @@ -3,15 +3,16 @@
686 Line 3
687 Line 4
688 Line 5
689 +Above
690 Line 6
691 Line 7
692 Line 8
693 Line 9
694 Line 10
695 -Line 11
696 -Line 12
697 -Line 13
698 +Middle A
699 +Middle B
700 Line 14
701 +Below
702 Line 15
703 Line 16
704 Line 17
705 "},
706 "After inserting below (should coalesce)"
707 );
708
709 // Fourth edit: insert FAR BELOW (row 25, beyond 8 lines from the current range end ~row 15)
710 // This should NOT coalesce - creates a new event
711 buffer.update(cx, |buffer, cx| {
712 let offset = Point::new(25, 0).to_offset(buffer);
713 buffer.edit(vec![(offset..offset, "Far below\n")], None, cx);
714 });
715
716 let events = ep_store.update(cx, |ep_store, cx| {
717 ep_store.edit_history_for_project(&project, cx)
718 });
719 assert_eq!(
720 render_events(&events),
721 indoc! {"
722 @@ -3,15 +3,16 @@
723 Line 3
724 Line 4
725 Line 5
726 +Above
727 Line 6
728 Line 7
729 Line 8
730 Line 9
731 Line 10
732 -Line 11
733 -Line 12
734 -Line 13
735 +Middle A
736 +Middle B
737 Line 14
738 +Below
739 Line 15
740 Line 16
741 Line 17
742
743 ---
744 @@ -23,6 +23,7 @@
745 Line 22
746 Line 23
747 Line 24
748 +Far below
749 Line 25
750 Line 26
751 Line 27
752 "},
753 "After inserting far below (should NOT coalesce)"
754 );
755}
756
757fn render_events(events: &[StoredEvent]) -> String {
758 events
759 .iter()
760 .map(|e| {
761 let zeta_prompt::Event::BufferChange { diff, .. } = e.event.as_ref();
762 diff.as_str()
763 })
764 .collect::<Vec<_>>()
765 .join("\n---\n")
766}
767
768fn render_events_with_predicted(events: &[StoredEvent]) -> Vec<String> {
769 events
770 .iter()
771 .map(|e| {
772 let zeta_prompt::Event::BufferChange {
773 diff, predicted, ..
774 } = e.event.as_ref();
775 let prefix = if *predicted { "predicted" } else { "manual" };
776 format!("{}\n{}", prefix, diff)
777 })
778 .collect()
779}
780
781fn make_collaborator_replica(
782 buffer: &Entity<Buffer>,
783 cx: &mut TestAppContext,
784) -> (Entity<Buffer>, clock::Global) {
785 let (state, version) =
786 buffer.read_with(cx, |buffer, _cx| (buffer.to_proto(_cx), buffer.version()));
787 let collaborator = cx.new(|_cx| {
788 Buffer::from_proto(ReplicaId::new(1), Capability::ReadWrite, state, None).unwrap()
789 });
790 (collaborator, version)
791}
792
793async fn apply_collaborator_edit(
794 collaborator: &Entity<Buffer>,
795 buffer: &Entity<Buffer>,
796 since_version: &mut clock::Global,
797 edit_range: Range<usize>,
798 new_text: &str,
799 cx: &mut TestAppContext,
800) {
801 collaborator.update(cx, |collaborator, cx| {
802 collaborator.edit([(edit_range, new_text)], None, cx);
803 });
804
805 let serialize_task = collaborator.read_with(cx, |collaborator, cx| {
806 collaborator.serialize_ops(Some(since_version.clone()), cx)
807 });
808 let ops = serialize_task.await;
809 *since_version = collaborator.read_with(cx, |collaborator, _cx| collaborator.version());
810
811 buffer.update(cx, |buffer, cx| {
812 buffer.apply_ops(
813 ops.into_iter()
814 .map(|op| language::proto::deserialize_operation(op).unwrap()),
815 cx,
816 );
817 });
818}
819
820#[gpui::test]
821async fn test_nearby_collaborator_edits_are_kept_in_history(cx: &mut TestAppContext) {
822 let (ep_store, _requests) = init_test_with_fake_client(cx);
823 let fs = FakeFs::new(cx.executor());
824 fs.insert_tree(
825 "/root",
826 json!({
827 "foo.rs": "line 0\nline 1\nline 2\nline 3\nline 4\nline 5\nline 6\nline 7\nline 8\nline 9\nline 10\nline 11\nline 12\nline 13\nline 14\n"
828 }),
829 )
830 .await;
831 let project = Project::test(fs, vec![path!("/root").as_ref()], cx).await;
832
833 let buffer = project
834 .update(cx, |project, cx| {
835 let path = project.find_project_path(path!("root/foo.rs"), cx).unwrap();
836 project.set_active_path(Some(path.clone()), cx);
837 project.open_buffer(path, cx)
838 })
839 .await
840 .unwrap();
841
842 let cursor = buffer.read_with(cx, |buffer, _cx| buffer.anchor_before(Point::new(1, 0)));
843
844 ep_store.update(cx, |ep_store, cx| {
845 ep_store.register_buffer(&buffer, &project, cx);
846 let _ = ep_store.prediction_at(&buffer, Some(cursor), &project, cx);
847 });
848
849 buffer.update(cx, |buffer, cx| {
850 buffer.edit(vec![(0..6, "LOCAL ZERO")], None, cx);
851 });
852
853 let (collaborator, mut collaborator_version) = make_collaborator_replica(&buffer, cx);
854
855 let (line_one_start, line_one_len) = collaborator.read_with(cx, |buffer, _cx| {
856 (Point::new(1, 0).to_offset(buffer), buffer.line_len(1))
857 });
858
859 apply_collaborator_edit(
860 &collaborator,
861 &buffer,
862 &mut collaborator_version,
863 line_one_start..line_one_start + line_one_len as usize,
864 "REMOTE ONE",
865 cx,
866 )
867 .await;
868
869 let events = ep_store.update(cx, |ep_store, cx| {
870 ep_store.edit_history_for_project(&project, cx)
871 });
872
873 assert_eq!(
874 render_events_with_predicted(&events),
875 vec![indoc! {"
876 manual
877 @@ -1,5 +1,5 @@
878 -line 0
879 -line 1
880 +LOCAL ZERO
881 +REMOTE ONE
882 line 2
883 line 3
884 line 4
885 "}]
886 );
887}
888
889#[gpui::test]
890async fn test_distant_collaborator_edits_are_omitted_from_history(cx: &mut TestAppContext) {
891 let (ep_store, _requests) = init_test_with_fake_client(cx);
892 let fs = FakeFs::new(cx.executor());
893 fs.insert_tree(
894 "/root",
895 json!({
896 "foo.rs": (0..1000)
897 .map(|i| format!("line {i}\n"))
898 .collect::<String>()
899 }),
900 )
901 .await;
902 let project = Project::test(fs, vec![path!("/root").as_ref()], cx).await;
903
904 let buffer = project
905 .update(cx, |project, cx| {
906 let path = project.find_project_path(path!("root/foo.rs"), cx).unwrap();
907 project.set_active_path(Some(path.clone()), cx);
908 project.open_buffer(path, cx)
909 })
910 .await
911 .unwrap();
912
913 let cursor = buffer.read_with(cx, |buffer, _cx| buffer.anchor_before(Point::new(1, 0)));
914
915 ep_store.update(cx, |ep_store, cx| {
916 ep_store.register_buffer(&buffer, &project, cx);
917 let _ = ep_store.prediction_at(&buffer, Some(cursor), &project, cx);
918 });
919
920 buffer.update(cx, |buffer, cx| {
921 buffer.edit(vec![(0..6, "LOCAL ZERO")], None, cx);
922 });
923
924 let (collaborator, mut collaborator_version) = make_collaborator_replica(&buffer, cx);
925
926 let far_line_start = buffer.read_with(cx, |buffer, _cx| Point::new(900, 0).to_offset(buffer));
927
928 apply_collaborator_edit(
929 &collaborator,
930 &buffer,
931 &mut collaborator_version,
932 far_line_start..far_line_start + 7,
933 "REMOTE FAR",
934 cx,
935 )
936 .await;
937
938 let events = ep_store.update(cx, |ep_store, cx| {
939 ep_store.edit_history_for_project(&project, cx)
940 });
941
942 assert_eq!(
943 render_events_with_predicted(&events),
944 vec![indoc! {"
945 manual
946 @@ -1,4 +1,4 @@
947 -line 0
948 +LOCAL ZERO
949 line 1
950 line 2
951 line 3
952 "}]
953 );
954}
955
956#[gpui::test]
957async fn test_irrelevant_collaborator_edits_in_different_files_are_omitted_from_history(
958 cx: &mut TestAppContext,
959) {
960 let (ep_store, _requests) = init_test_with_fake_client(cx);
961 let fs = FakeFs::new(cx.executor());
962 fs.insert_tree(
963 "/root",
964 json!({
965 "foo.rs": "line 0\nline 1\nline 2\nline 3\n",
966 "bar.rs": "line 0\nline 1\nline 2\nline 3\n"
967 }),
968 )
969 .await;
970 let project = Project::test(fs, vec![path!("/root").as_ref()], cx).await;
971
972 let foo_buffer = project
973 .update(cx, |project, cx| {
974 let path = project.find_project_path(path!("root/foo.rs"), cx).unwrap();
975 project.set_active_path(Some(path.clone()), cx);
976 project.open_buffer(path, cx)
977 })
978 .await
979 .unwrap();
980 let bar_buffer = project
981 .update(cx, |project, cx| {
982 let path = project.find_project_path(path!("root/bar.rs"), cx).unwrap();
983 project.open_buffer(path, cx)
984 })
985 .await
986 .unwrap();
987
988 let foo_cursor = foo_buffer.read_with(cx, |buffer, _cx| buffer.anchor_before(Point::new(1, 0)));
989
990 ep_store.update(cx, |ep_store, cx| {
991 ep_store.register_buffer(&foo_buffer, &project, cx);
992 ep_store.register_buffer(&bar_buffer, &project, cx);
993 let _ = ep_store.prediction_at(&foo_buffer, Some(foo_cursor), &project, cx);
994 });
995
996 let (bar_collaborator, mut bar_version) = make_collaborator_replica(&bar_buffer, cx);
997
998 apply_collaborator_edit(
999 &bar_collaborator,
1000 &bar_buffer,
1001 &mut bar_version,
1002 0..6,
1003 "REMOTE BAR",
1004 cx,
1005 )
1006 .await;
1007
1008 let events = ep_store.update(cx, |ep_store, cx| {
1009 ep_store.edit_history_for_project(&project, cx)
1010 });
1011
1012 assert!(events.is_empty());
1013}
1014
1015#[gpui::test]
1016async fn test_large_edits_are_omitted_from_history(cx: &mut TestAppContext) {
1017 let (ep_store, _requests) = init_test_with_fake_client(cx);
1018 let fs = FakeFs::new(cx.executor());
1019 fs.insert_tree(
1020 "/root",
1021 json!({
1022 "foo.rs": (0..20)
1023 .map(|i| format!("line {i}\n"))
1024 .collect::<String>()
1025 }),
1026 )
1027 .await;
1028 let project = Project::test(fs, vec![path!("/root").as_ref()], cx).await;
1029
1030 let buffer = project
1031 .update(cx, |project, cx| {
1032 let path = project.find_project_path(path!("root/foo.rs"), cx).unwrap();
1033 project.set_active_path(Some(path.clone()), cx);
1034 project.open_buffer(path, cx)
1035 })
1036 .await
1037 .unwrap();
1038
1039 let cursor = buffer.read_with(cx, |buffer, _cx| buffer.anchor_before(Point::new(1, 0)));
1040
1041 ep_store.update(cx, |ep_store, cx| {
1042 ep_store.register_buffer(&buffer, &project, cx);
1043 let _ = ep_store.prediction_at(&buffer, Some(cursor), &project, cx);
1044 });
1045
1046 buffer.update(cx, |buffer, cx| {
1047 buffer.edit(vec![(0..6, "LOCAL ZERO")], None, cx);
1048 });
1049
1050 let (collaborator, mut collaborator_version) = make_collaborator_replica(&buffer, cx);
1051
1052 let (line_three_start, line_three_len) = collaborator.read_with(cx, |buffer, _cx| {
1053 (Point::new(3, 0).to_offset(buffer), buffer.line_len(3))
1054 });
1055 let large_edit = "X".repeat(EDIT_HISTORY_DIFF_SIZE_LIMIT + 1);
1056
1057 apply_collaborator_edit(
1058 &collaborator,
1059 &buffer,
1060 &mut collaborator_version,
1061 line_three_start..line_three_start + line_three_len as usize,
1062 &large_edit,
1063 cx,
1064 )
1065 .await;
1066
1067 buffer.update(cx, |buffer, cx| {
1068 let line_seven_start = Point::new(7, 0).to_offset(buffer);
1069 let line_seven_end = Point::new(7, 6).to_offset(buffer);
1070 buffer.edit(
1071 vec![(line_seven_start..line_seven_end, "LOCAL SEVEN")],
1072 None,
1073 cx,
1074 );
1075 });
1076
1077 let events = ep_store.update(cx, |ep_store, cx| {
1078 ep_store.edit_history_for_project(&project, cx)
1079 });
1080
1081 let rendered_events = render_events_with_predicted(&events);
1082
1083 assert_eq!(rendered_events.len(), 2);
1084 assert!(rendered_events[0].contains("+LOCAL ZERO"));
1085 assert!(!rendered_events[0].contains(&large_edit));
1086 assert!(rendered_events[1].contains("+LOCAL SEVEN"));
1087 assert!(!rendered_events[1].contains(&large_edit));
1088}
1089
1090#[gpui::test]
1091async fn test_predicted_flag_coalescing(cx: &mut TestAppContext) {
1092 let (ep_store, _requests) = init_test_with_fake_client(cx);
1093 let fs = FakeFs::new(cx.executor());
1094 fs.insert_tree(
1095 "/root",
1096 json!({
1097 "foo.rs": "line 0\nline 1\nline 2\nline 3\nline 4\nline 5\nline 6\nline 7\nline 8\nline 9\nline 10\nline 11\nline 12\nline 13\nline 14\n"
1098 }),
1099 )
1100 .await;
1101 let project = Project::test(fs, vec![path!("/root").as_ref()], cx).await;
1102
1103 let buffer = project
1104 .update(cx, |project, cx| {
1105 let path = project.find_project_path(path!("root/foo.rs"), cx).unwrap();
1106 project.open_buffer(path, cx)
1107 })
1108 .await
1109 .unwrap();
1110
1111 ep_store.update(cx, |ep_store, cx| {
1112 ep_store.register_buffer(&buffer, &project, cx);
1113 });
1114
1115 // Case 1: Manual edits have `predicted` set to false.
1116 buffer.update(cx, |buffer, cx| {
1117 buffer.edit(vec![(0..6, "LINE ZERO")], None, cx);
1118 });
1119
1120 let events = ep_store.update(cx, |ep_store, cx| {
1121 ep_store.edit_history_for_project(&project, cx)
1122 });
1123
1124 assert_eq!(
1125 render_events_with_predicted(&events),
1126 vec![indoc! {"
1127 manual
1128 @@ -1,4 +1,4 @@
1129 -line 0
1130 +LINE ZERO
1131 line 1
1132 line 2
1133 line 3
1134 "}]
1135 );
1136
1137 // Case 2: Multiple successive manual edits near each other are merged into one
1138 // event with `predicted` set to false.
1139 buffer.update(cx, |buffer, cx| {
1140 let offset = Point::new(1, 0).to_offset(buffer);
1141 let end = Point::new(1, 6).to_offset(buffer);
1142 buffer.edit(vec![(offset..end, "LINE ONE")], None, cx);
1143 });
1144
1145 let events = ep_store.update(cx, |ep_store, cx| {
1146 ep_store.edit_history_for_project(&project, cx)
1147 });
1148 assert_eq!(
1149 render_events_with_predicted(&events),
1150 vec![indoc! {"
1151 manual
1152 @@ -1,5 +1,5 @@
1153 -line 0
1154 -line 1
1155 +LINE ZERO
1156 +LINE ONE
1157 line 2
1158 line 3
1159 line 4
1160 "}]
1161 );
1162
1163 // Case 3: Accepted predictions have `predicted` set to true.
1164 // Case 5: A manual edit that follows a predicted edit is not merged with the
1165 // predicted edit, even if it is nearby.
1166 ep_store.update(cx, |ep_store, cx| {
1167 buffer.update(cx, |buffer, cx| {
1168 let offset = Point::new(2, 0).to_offset(buffer);
1169 let end = Point::new(2, 6).to_offset(buffer);
1170 buffer.edit(vec![(offset..end, "LINE TWO")], None, cx);
1171 });
1172 ep_store.report_changes_for_buffer(&buffer, &project, true, true, cx);
1173 });
1174
1175 let events = ep_store.update(cx, |ep_store, cx| {
1176 ep_store.edit_history_for_project(&project, cx)
1177 });
1178 assert_eq!(
1179 render_events_with_predicted(&events),
1180 vec![
1181 indoc! {"
1182 manual
1183 @@ -1,5 +1,5 @@
1184 -line 0
1185 -line 1
1186 +LINE ZERO
1187 +LINE ONE
1188 line 2
1189 line 3
1190 line 4
1191 "},
1192 indoc! {"
1193 predicted
1194 @@ -1,6 +1,6 @@
1195 LINE ZERO
1196 LINE ONE
1197 -line 2
1198 +LINE TWO
1199 line 3
1200 line 4
1201 line 5
1202 "}
1203 ]
1204 );
1205
1206 // Case 4: Multiple successive accepted predictions near each other are merged
1207 // into one event with `predicted` set to true.
1208 ep_store.update(cx, |ep_store, cx| {
1209 buffer.update(cx, |buffer, cx| {
1210 let offset = Point::new(3, 0).to_offset(buffer);
1211 let end = Point::new(3, 6).to_offset(buffer);
1212 buffer.edit(vec![(offset..end, "LINE THREE")], None, cx);
1213 });
1214 ep_store.report_changes_for_buffer(&buffer, &project, true, true, cx);
1215 });
1216
1217 let events = ep_store.update(cx, |ep_store, cx| {
1218 ep_store.edit_history_for_project(&project, cx)
1219 });
1220 assert_eq!(
1221 render_events_with_predicted(&events),
1222 vec![
1223 indoc! {"
1224 manual
1225 @@ -1,5 +1,5 @@
1226 -line 0
1227 -line 1
1228 +LINE ZERO
1229 +LINE ONE
1230 line 2
1231 line 3
1232 line 4
1233 "},
1234 indoc! {"
1235 predicted
1236 @@ -1,7 +1,7 @@
1237 LINE ZERO
1238 LINE ONE
1239 -line 2
1240 -line 3
1241 +LINE TWO
1242 +LINE THREE
1243 line 4
1244 line 5
1245 line 6
1246 "}
1247 ]
1248 );
1249
1250 // Case 5 (continued): A manual edit that follows a predicted edit is not merged
1251 // with the predicted edit, even if it is nearby.
1252 buffer.update(cx, |buffer, cx| {
1253 let offset = Point::new(4, 0).to_offset(buffer);
1254 let end = Point::new(4, 6).to_offset(buffer);
1255 buffer.edit(vec![(offset..end, "LINE FOUR")], None, cx);
1256 });
1257
1258 let events = ep_store.update(cx, |ep_store, cx| {
1259 ep_store.edit_history_for_project(&project, cx)
1260 });
1261 assert_eq!(
1262 render_events_with_predicted(&events),
1263 vec![
1264 indoc! {"
1265 manual
1266 @@ -1,5 +1,5 @@
1267 -line 0
1268 -line 1
1269 +LINE ZERO
1270 +LINE ONE
1271 line 2
1272 line 3
1273 line 4
1274 "},
1275 indoc! {"
1276 predicted
1277 @@ -1,7 +1,7 @@
1278 LINE ZERO
1279 LINE ONE
1280 -line 2
1281 -line 3
1282 +LINE TWO
1283 +LINE THREE
1284 line 4
1285 line 5
1286 line 6
1287 "},
1288 indoc! {"
1289 manual
1290 @@ -2,7 +2,7 @@
1291 LINE ONE
1292 LINE TWO
1293 LINE THREE
1294 -line 4
1295 +LINE FOUR
1296 line 5
1297 line 6
1298 line 7
1299 "}
1300 ]
1301 );
1302
1303 // Case 6: If we then perform a manual edit at a *different* location (more than
1304 // 8 lines away), then the edits at the prior location can be merged with each
1305 // other, even if some are predicted and some are not. `predicted` means all
1306 // constituent edits were predicted.
1307 buffer.update(cx, |buffer, cx| {
1308 let offset = Point::new(14, 0).to_offset(buffer);
1309 let end = Point::new(14, 7).to_offset(buffer);
1310 buffer.edit(vec![(offset..end, "LINE FOURTEEN")], None, cx);
1311 });
1312
1313 let events = ep_store.update(cx, |ep_store, cx| {
1314 ep_store.edit_history_for_project(&project, cx)
1315 });
1316 assert_eq!(
1317 render_events_with_predicted(&events),
1318 vec![
1319 indoc! {"
1320 manual
1321 @@ -1,8 +1,8 @@
1322 -line 0
1323 -line 1
1324 -line 2
1325 -line 3
1326 -line 4
1327 +LINE ZERO
1328 +LINE ONE
1329 +LINE TWO
1330 +LINE THREE
1331 +LINE FOUR
1332 line 5
1333 line 6
1334 line 7
1335 "},
1336 indoc! {"
1337 manual
1338 @@ -12,4 +12,4 @@
1339 line 11
1340 line 12
1341 line 13
1342 -line 14
1343 +LINE FOURTEEN
1344 "}
1345 ]
1346 );
1347}
1348
1349#[gpui::test]
1350async fn test_empty_prediction(cx: &mut TestAppContext) {
1351 let (ep_store, mut requests) = init_test_with_fake_client(cx);
1352 let fs = FakeFs::new(cx.executor());
1353 fs.insert_tree(
1354 "/root",
1355 json!({
1356 "foo.md": "Hello!\nHow\nBye\n"
1357 }),
1358 )
1359 .await;
1360 let project = Project::test(fs, vec![path!("/root").as_ref()], cx).await;
1361
1362 let buffer = project
1363 .update(cx, |project, cx| {
1364 let path = project.find_project_path(path!("root/foo.md"), cx).unwrap();
1365 project.open_buffer(path, cx)
1366 })
1367 .await
1368 .unwrap();
1369 let snapshot = buffer.read_with(cx, |buffer, _cx| buffer.snapshot());
1370 let position = snapshot.anchor_before(language::Point::new(1, 3));
1371
1372 ep_store.update(cx, |ep_store, cx| {
1373 ep_store.refresh_prediction_from_buffer(project.clone(), buffer.clone(), position, cx);
1374 });
1375
1376 let (request, respond_tx) = requests.predict.next().await.unwrap();
1377 let mut response = model_response(&request, "");
1378 response.model_version = Some("zeta2:test-empty".to_string());
1379 let id = response.request_id.clone();
1380 respond_tx.send(response).unwrap();
1381
1382 cx.run_until_parked();
1383
1384 ep_store.update(cx, |ep_store, cx| {
1385 assert!(
1386 ep_store
1387 .prediction_at(&buffer, None, &project, cx)
1388 .is_none()
1389 );
1390 });
1391
1392 // prediction is reported as rejected
1393 let (reject_request, _) = requests.reject.next().await.unwrap();
1394
1395 assert_eq!(
1396 &reject_request.rejections,
1397 &[EditPredictionRejection {
1398 request_id: id,
1399 reason: EditPredictionRejectReason::Empty,
1400 was_shown: false,
1401 model_version: Some("zeta2:test-empty".to_string()),
1402 e2e_latency_ms: Some(0),
1403 }]
1404 );
1405}
1406
1407#[gpui::test]
1408async fn test_interpolated_empty(cx: &mut TestAppContext) {
1409 let (ep_store, mut requests) = init_test_with_fake_client(cx);
1410 let fs = FakeFs::new(cx.executor());
1411 fs.insert_tree(
1412 "/root",
1413 json!({
1414 "foo.md": "Hello!\nHow\nBye\n"
1415 }),
1416 )
1417 .await;
1418 let project = Project::test(fs, vec![path!("/root").as_ref()], cx).await;
1419
1420 let buffer = project
1421 .update(cx, |project, cx| {
1422 let path = project.find_project_path(path!("root/foo.md"), cx).unwrap();
1423 project.open_buffer(path, cx)
1424 })
1425 .await
1426 .unwrap();
1427 let snapshot = buffer.read_with(cx, |buffer, _cx| buffer.snapshot());
1428 let position = snapshot.anchor_before(language::Point::new(1, 3));
1429
1430 ep_store.update(cx, |ep_store, cx| {
1431 ep_store.refresh_prediction_from_buffer(project.clone(), buffer.clone(), position, cx);
1432 });
1433
1434 let (request, respond_tx) = requests.predict.next().await.unwrap();
1435
1436 buffer.update(cx, |buffer, cx| {
1437 buffer.set_text("Hello!\nHow are you?\nBye", cx);
1438 });
1439
1440 let mut response = model_response(&request, SIMPLE_DIFF);
1441 response.model_version = Some("zeta2:test-interpolated-empty".to_string());
1442 let id = response.request_id.clone();
1443 respond_tx.send(response).unwrap();
1444
1445 cx.run_until_parked();
1446
1447 ep_store.update(cx, |ep_store, cx| {
1448 assert!(
1449 ep_store
1450 .prediction_at(&buffer, None, &project, cx)
1451 .is_none()
1452 );
1453 });
1454
1455 // prediction is reported as rejected
1456 let (reject_request, _) = requests.reject.next().await.unwrap();
1457
1458 assert_eq!(
1459 &reject_request.rejections,
1460 &[EditPredictionRejection {
1461 request_id: id,
1462 reason: EditPredictionRejectReason::InterpolatedEmpty,
1463 was_shown: false,
1464 model_version: Some("zeta2:test-interpolated-empty".to_string()),
1465 e2e_latency_ms: Some(0),
1466 }]
1467 );
1468}
1469
1470const SIMPLE_DIFF: &str = indoc! { r"
1471 --- a/root/foo.md
1472 +++ b/root/foo.md
1473 @@ ... @@
1474 Hello!
1475 -How
1476 +How are you?
1477 Bye
1478"};
1479
1480#[gpui::test]
1481async fn test_replace_current(cx: &mut TestAppContext) {
1482 let (ep_store, mut requests) = init_test_with_fake_client(cx);
1483 let fs = FakeFs::new(cx.executor());
1484 fs.insert_tree(
1485 "/root",
1486 json!({
1487 "foo.md": "Hello!\nHow\nBye\n"
1488 }),
1489 )
1490 .await;
1491 let project = Project::test(fs, vec![path!("/root").as_ref()], cx).await;
1492
1493 let buffer = project
1494 .update(cx, |project, cx| {
1495 let path = project.find_project_path(path!("root/foo.md"), cx).unwrap();
1496 project.open_buffer(path, cx)
1497 })
1498 .await
1499 .unwrap();
1500 let snapshot = buffer.read_with(cx, |buffer, _cx| buffer.snapshot());
1501 let position = snapshot.anchor_before(language::Point::new(1, 3));
1502
1503 ep_store.update(cx, |ep_store, cx| {
1504 ep_store.refresh_prediction_from_buffer(project.clone(), buffer.clone(), position, cx);
1505 });
1506
1507 let (request, respond_tx) = requests.predict.next().await.unwrap();
1508 let first_response = model_response(&request, SIMPLE_DIFF);
1509 let first_id = first_response.request_id.clone();
1510 respond_tx.send(first_response).unwrap();
1511
1512 cx.run_until_parked();
1513
1514 ep_store.update(cx, |ep_store, cx| {
1515 assert_eq!(
1516 ep_store
1517 .prediction_at(&buffer, None, &project, cx)
1518 .unwrap()
1519 .id
1520 .0,
1521 first_id
1522 );
1523 });
1524
1525 // a second request is triggered
1526 ep_store.update(cx, |ep_store, cx| {
1527 ep_store.refresh_prediction_from_buffer(project.clone(), buffer.clone(), position, cx);
1528 });
1529
1530 let (request, respond_tx) = requests.predict.next().await.unwrap();
1531 let second_response = model_response(&request, SIMPLE_DIFF);
1532 let second_id = second_response.request_id.clone();
1533 respond_tx.send(second_response).unwrap();
1534
1535 cx.run_until_parked();
1536
1537 ep_store.update(cx, |ep_store, cx| {
1538 // second replaces first
1539 assert_eq!(
1540 ep_store
1541 .prediction_at(&buffer, None, &project, cx)
1542 .unwrap()
1543 .id
1544 .0,
1545 second_id
1546 );
1547 });
1548
1549 // first is reported as replaced
1550 let (reject_request, _) = requests.reject.next().await.unwrap();
1551
1552 assert_eq!(
1553 &reject_request.rejections,
1554 &[EditPredictionRejection {
1555 request_id: first_id,
1556 reason: EditPredictionRejectReason::Replaced,
1557 was_shown: false,
1558 model_version: None,
1559 e2e_latency_ms: Some(0),
1560 }]
1561 );
1562}
1563
1564#[gpui::test]
1565async fn test_current_preferred(cx: &mut TestAppContext) {
1566 let (ep_store, mut requests) = init_test_with_fake_client(cx);
1567 let fs = FakeFs::new(cx.executor());
1568 fs.insert_tree(
1569 "/root",
1570 json!({
1571 "foo.md": "Hello!\nHow\nBye\n"
1572 }),
1573 )
1574 .await;
1575 let project = Project::test(fs, vec![path!("/root").as_ref()], cx).await;
1576
1577 let buffer = project
1578 .update(cx, |project, cx| {
1579 let path = project.find_project_path(path!("root/foo.md"), cx).unwrap();
1580 project.open_buffer(path, cx)
1581 })
1582 .await
1583 .unwrap();
1584 let snapshot = buffer.read_with(cx, |buffer, _cx| buffer.snapshot());
1585 let position = snapshot.anchor_before(language::Point::new(1, 3));
1586
1587 ep_store.update(cx, |ep_store, cx| {
1588 ep_store.refresh_prediction_from_buffer(project.clone(), buffer.clone(), position, cx);
1589 });
1590
1591 let (request, respond_tx) = requests.predict.next().await.unwrap();
1592 let first_response = model_response(&request, SIMPLE_DIFF);
1593 let first_id = first_response.request_id.clone();
1594 respond_tx.send(first_response).unwrap();
1595
1596 cx.run_until_parked();
1597
1598 ep_store.update(cx, |ep_store, cx| {
1599 assert_eq!(
1600 ep_store
1601 .prediction_at(&buffer, None, &project, cx)
1602 .unwrap()
1603 .id
1604 .0,
1605 first_id
1606 );
1607 });
1608
1609 // a second request is triggered
1610 ep_store.update(cx, |ep_store, cx| {
1611 ep_store.refresh_prediction_from_buffer(project.clone(), buffer.clone(), position, cx);
1612 });
1613
1614 let (request, respond_tx) = requests.predict.next().await.unwrap();
1615 // worse than current prediction
1616 let mut second_response = model_response(
1617 &request,
1618 indoc! { r"
1619 --- a/root/foo.md
1620 +++ b/root/foo.md
1621 @@ ... @@
1622 Hello!
1623 -How
1624 +How are
1625 Bye
1626 "},
1627 );
1628 second_response.model_version = Some("zeta2:test-current-preferred".to_string());
1629 let second_id = second_response.request_id.clone();
1630 respond_tx.send(second_response).unwrap();
1631
1632 cx.run_until_parked();
1633
1634 ep_store.update(cx, |ep_store, cx| {
1635 // first is preferred over second
1636 assert_eq!(
1637 ep_store
1638 .prediction_at(&buffer, None, &project, cx)
1639 .unwrap()
1640 .id
1641 .0,
1642 first_id
1643 );
1644 });
1645
1646 // second is reported as rejected
1647 let (reject_request, _) = requests.reject.next().await.unwrap();
1648
1649 assert_eq!(
1650 &reject_request.rejections,
1651 &[EditPredictionRejection {
1652 request_id: second_id,
1653 reason: EditPredictionRejectReason::CurrentPreferred,
1654 was_shown: false,
1655 model_version: Some("zeta2:test-current-preferred".to_string()),
1656 e2e_latency_ms: Some(0),
1657 }]
1658 );
1659}
1660
1661#[gpui::test]
1662async fn test_cancel_earlier_pending_requests(cx: &mut TestAppContext) {
1663 let (ep_store, mut requests) = init_test_with_fake_client(cx);
1664 let fs = FakeFs::new(cx.executor());
1665 fs.insert_tree(
1666 "/root",
1667 json!({
1668 "foo.md": "Hello!\nHow\nBye\n"
1669 }),
1670 )
1671 .await;
1672 let project = Project::test(fs, vec![path!("/root").as_ref()], cx).await;
1673
1674 let buffer = project
1675 .update(cx, |project, cx| {
1676 let path = project.find_project_path(path!("root/foo.md"), cx).unwrap();
1677 project.open_buffer(path, cx)
1678 })
1679 .await
1680 .unwrap();
1681 let snapshot = buffer.read_with(cx, |buffer, _cx| buffer.snapshot());
1682 let position = snapshot.anchor_before(language::Point::new(1, 3));
1683
1684 // start two refresh tasks
1685 ep_store.update(cx, |ep_store, cx| {
1686 ep_store.refresh_prediction_from_buffer(project.clone(), buffer.clone(), position, cx);
1687 });
1688
1689 let (request1, respond_first) = requests.predict.next().await.unwrap();
1690
1691 ep_store.update(cx, |ep_store, cx| {
1692 ep_store.refresh_prediction_from_buffer(project.clone(), buffer.clone(), position, cx);
1693 });
1694
1695 let (request, respond_second) = requests.predict.next().await.unwrap();
1696
1697 // wait for throttle
1698 cx.run_until_parked();
1699
1700 // second responds first
1701 let second_response = model_response(&request, SIMPLE_DIFF);
1702 let second_id = second_response.request_id.clone();
1703 respond_second.send(second_response).unwrap();
1704
1705 cx.run_until_parked();
1706
1707 ep_store.update(cx, |ep_store, cx| {
1708 // current prediction is second
1709 assert_eq!(
1710 ep_store
1711 .prediction_at(&buffer, None, &project, cx)
1712 .unwrap()
1713 .id
1714 .0,
1715 second_id
1716 );
1717 });
1718
1719 let mut first_response = model_response(&request1, SIMPLE_DIFF);
1720 first_response.model_version = Some("zeta2:test-canceled".to_string());
1721 let first_id = first_response.request_id.clone();
1722 respond_first.send(first_response).unwrap();
1723
1724 cx.run_until_parked();
1725
1726 ep_store.update(cx, |ep_store, cx| {
1727 // current prediction is still second, since first was cancelled
1728 assert_eq!(
1729 ep_store
1730 .prediction_at(&buffer, None, &project, cx)
1731 .unwrap()
1732 .id
1733 .0,
1734 second_id
1735 );
1736 });
1737
1738 // first is reported as rejected
1739 let (reject_request, _) = requests.reject.next().await.unwrap();
1740
1741 cx.run_until_parked();
1742
1743 assert_eq!(
1744 &reject_request.rejections,
1745 &[EditPredictionRejection {
1746 request_id: first_id,
1747 reason: EditPredictionRejectReason::Canceled,
1748 was_shown: false,
1749 model_version: Some("zeta2:test-canceled".to_string()),
1750 e2e_latency_ms: None,
1751 }]
1752 );
1753}
1754
1755#[gpui::test]
1756async fn test_cancel_second_on_third_request(cx: &mut TestAppContext) {
1757 let (ep_store, mut requests) = init_test_with_fake_client(cx);
1758 let fs = FakeFs::new(cx.executor());
1759 fs.insert_tree(
1760 "/root",
1761 json!({
1762 "foo.md": "Hello!\nHow\nBye\n"
1763 }),
1764 )
1765 .await;
1766 let project = Project::test(fs, vec![path!("/root").as_ref()], cx).await;
1767
1768 let buffer = project
1769 .update(cx, |project, cx| {
1770 let path = project.find_project_path(path!("root/foo.md"), cx).unwrap();
1771 project.open_buffer(path, cx)
1772 })
1773 .await
1774 .unwrap();
1775 let snapshot = buffer.read_with(cx, |buffer, _cx| buffer.snapshot());
1776 let position = snapshot.anchor_before(language::Point::new(1, 3));
1777
1778 // start two refresh tasks
1779 ep_store.update(cx, |ep_store, cx| {
1780 ep_store.refresh_prediction_from_buffer(project.clone(), buffer.clone(), position, cx);
1781 });
1782
1783 let (request1, respond_first) = requests.predict.next().await.unwrap();
1784
1785 ep_store.update(cx, |ep_store, cx| {
1786 ep_store.refresh_prediction_from_buffer(project.clone(), buffer.clone(), position, cx);
1787 });
1788
1789 let (request2, respond_second) = requests.predict.next().await.unwrap();
1790
1791 // wait for throttle, so requests are sent
1792 cx.run_until_parked();
1793
1794 ep_store.update(cx, |ep_store, cx| {
1795 // start a third request
1796 ep_store.refresh_prediction_from_buffer(project.clone(), buffer.clone(), position, cx);
1797
1798 // 2 are pending, so 2nd is cancelled
1799 assert_eq!(
1800 ep_store
1801 .get_or_init_project(&project, cx)
1802 .cancelled_predictions
1803 .iter()
1804 .copied()
1805 .collect::<Vec<_>>(),
1806 [1]
1807 );
1808 });
1809
1810 // wait for throttle
1811 cx.run_until_parked();
1812
1813 let (request3, respond_third) = requests.predict.next().await.unwrap();
1814
1815 let first_response = model_response(&request1, SIMPLE_DIFF);
1816 let first_id = first_response.request_id.clone();
1817 respond_first.send(first_response).unwrap();
1818
1819 cx.run_until_parked();
1820
1821 ep_store.update(cx, |ep_store, cx| {
1822 // current prediction is first
1823 assert_eq!(
1824 ep_store
1825 .prediction_at(&buffer, None, &project, cx)
1826 .unwrap()
1827 .id
1828 .0,
1829 first_id
1830 );
1831 });
1832
1833 let mut cancelled_response = model_response(&request2, SIMPLE_DIFF);
1834 cancelled_response.model_version = Some("zeta2:test-canceled-second".to_string());
1835 let cancelled_id = cancelled_response.request_id.clone();
1836 respond_second.send(cancelled_response).unwrap();
1837
1838 cx.run_until_parked();
1839
1840 ep_store.update(cx, |ep_store, cx| {
1841 // current prediction is still first, since second was cancelled
1842 assert_eq!(
1843 ep_store
1844 .prediction_at(&buffer, None, &project, cx)
1845 .unwrap()
1846 .id
1847 .0,
1848 first_id
1849 );
1850 });
1851
1852 let third_response = model_response(&request3, SIMPLE_DIFF);
1853 let third_response_id = third_response.request_id.clone();
1854 respond_third.send(third_response).unwrap();
1855
1856 cx.run_until_parked();
1857
1858 ep_store.update(cx, |ep_store, cx| {
1859 // third completes and replaces first
1860 assert_eq!(
1861 ep_store
1862 .prediction_at(&buffer, None, &project, cx)
1863 .unwrap()
1864 .id
1865 .0,
1866 third_response_id
1867 );
1868 });
1869
1870 // second is reported as rejected
1871 let (reject_request, _) = requests.reject.next().await.unwrap();
1872
1873 cx.run_until_parked();
1874
1875 assert_eq!(
1876 &reject_request.rejections,
1877 &[
1878 EditPredictionRejection {
1879 request_id: cancelled_id,
1880 reason: EditPredictionRejectReason::Canceled,
1881 was_shown: false,
1882 model_version: Some("zeta2:test-canceled-second".to_string()),
1883 e2e_latency_ms: None,
1884 },
1885 EditPredictionRejection {
1886 request_id: first_id,
1887 reason: EditPredictionRejectReason::Replaced,
1888 was_shown: false,
1889 model_version: None,
1890 // 2 throttle waits (for 2nd and 3rd requests) elapsed
1891 // between this request's start and response.
1892 e2e_latency_ms: Some(2 * EditPredictionStore::THROTTLE_TIMEOUT.as_millis()),
1893 }
1894 ]
1895 );
1896}
1897
1898#[gpui::test]
1899async fn test_jump_and_edit_throttles_are_independent(cx: &mut TestAppContext) {
1900 let (ep_store, mut requests) = init_test_with_fake_client(cx);
1901
1902 let fs = FakeFs::new(cx.executor());
1903 fs.insert_tree(
1904 "/root",
1905 json!({
1906 "foo.md": "Hello!\nHow\nBye\n",
1907 "bar.md": "Hola!\nComo\nAdios\n"
1908 }),
1909 )
1910 .await;
1911 let project = Project::test(fs, vec![path!("/root").as_ref()], cx).await;
1912
1913 let buffer = project
1914 .update(cx, |project, cx| {
1915 let path = project.find_project_path(path!("root/foo.md"), cx).unwrap();
1916 project.set_active_path(Some(path.clone()), cx);
1917 project.open_buffer(path, cx)
1918 })
1919 .await
1920 .unwrap();
1921 let snapshot = buffer.read_with(cx, |buffer, _cx| buffer.snapshot());
1922 let position = snapshot.anchor_before(language::Point::new(1, 3));
1923
1924 ep_store.update(cx, |ep_store, cx| {
1925 ep_store.register_project(&project, cx);
1926 ep_store.register_buffer(&buffer, &project, cx);
1927 });
1928
1929 // First edit request - no prior edit, so not throttled.
1930 ep_store.update(cx, |ep_store, cx| {
1931 ep_store.refresh_prediction_from_buffer(project.clone(), buffer.clone(), position, cx);
1932 });
1933 let (_edit_request, edit_response_tx) = requests.predict.next().await.unwrap();
1934 edit_response_tx.send(empty_response()).unwrap();
1935 cx.run_until_parked();
1936
1937 let diagnostic = lsp::Diagnostic {
1938 range: lsp::Range::new(lsp::Position::new(1, 1), lsp::Position::new(1, 5)),
1939 severity: Some(lsp::DiagnosticSeverity::ERROR),
1940 message: "Sentence is incomplete".to_string(),
1941 ..Default::default()
1942 };
1943
1944 // First jump request triggered by diagnostic event on buffer - no prior jump, so not throttled (independent from edit).
1945 project.update(cx, |project, cx| {
1946 project.lsp_store().update(cx, |lsp_store, cx| {
1947 lsp_store
1948 .update_diagnostics(
1949 LanguageServerId(0),
1950 lsp::PublishDiagnosticsParams {
1951 uri: lsp::Uri::from_file_path(path!("/root/bar.md")).unwrap(),
1952 diagnostics: vec![diagnostic],
1953 version: None,
1954 },
1955 None,
1956 language::DiagnosticSourceKind::Pushed,
1957 &[],
1958 cx,
1959 )
1960 .unwrap();
1961 });
1962 });
1963 let (_jump_request, jump_response_tx) = requests.predict.next().await.unwrap();
1964 jump_response_tx.send(empty_response()).unwrap();
1965 cx.run_until_parked();
1966
1967 // Second edit request - should be throttled by the first edit.
1968 ep_store.update(cx, |ep_store, cx| {
1969 ep_store.refresh_prediction_from_buffer(project.clone(), buffer.clone(), position, cx);
1970 });
1971 assert_no_predict_request_ready(&mut requests.predict);
1972
1973 // Second jump request - should be throttled by the first jump.
1974 ep_store.update(cx, |ep_store, cx| {
1975 ep_store.refresh_prediction_from_diagnostics(
1976 project.clone(),
1977 DiagnosticSearchScope::Global,
1978 cx,
1979 );
1980 });
1981 assert_no_predict_request_ready(&mut requests.predict);
1982
1983 // Wait for both throttles to expire.
1984 cx.background_executor
1985 .advance_clock(EditPredictionStore::THROTTLE_TIMEOUT);
1986 cx.background_executor.run_until_parked();
1987 cx.run_until_parked();
1988
1989 // Both requests should now go through.
1990 let (_request_1, response_tx_1) = requests.predict.next().await.unwrap();
1991 response_tx_1.send(empty_response()).unwrap();
1992 cx.run_until_parked();
1993
1994 let (_request_2, response_tx_2) = requests.predict.next().await.unwrap();
1995 response_tx_2.send(empty_response()).unwrap();
1996 cx.run_until_parked();
1997}
1998
1999#[gpui::test]
2000async fn test_same_frame_duplicate_requests_deduplicated(cx: &mut TestAppContext) {
2001 let (ep_store, mut requests) = init_test_with_fake_client(cx);
2002 let fs = FakeFs::new(cx.executor());
2003 fs.insert_tree(
2004 "/root",
2005 json!({
2006 "foo.md": "Hello!\nHow\nBye\n"
2007 }),
2008 )
2009 .await;
2010 let project = Project::test(fs, vec![path!("/root").as_ref()], cx).await;
2011
2012 let buffer = project
2013 .update(cx, |project, cx| {
2014 let path = project.find_project_path(path!("root/foo.md"), cx).unwrap();
2015 project.open_buffer(path, cx)
2016 })
2017 .await
2018 .unwrap();
2019 let snapshot = buffer.read_with(cx, |buffer, _cx| buffer.snapshot());
2020 let position = snapshot.anchor_before(language::Point::new(1, 3));
2021
2022 // Enqueue two refresh calls in the same synchronous frame (no yielding).
2023 // Both `cx.spawn` tasks are created before either executes, so they both
2024 // capture the same `proceed_count_at_enqueue`. Only the first task should
2025 // pass the deduplication gate; the second should be skipped.
2026 ep_store.update(cx, |ep_store, cx| {
2027 ep_store.refresh_prediction_from_buffer(project.clone(), buffer.clone(), position, cx);
2028 ep_store.refresh_prediction_from_buffer(project.clone(), buffer.clone(), position, cx);
2029 });
2030
2031 // Let both spawned tasks run to completion (including any throttle waits).
2032 cx.run_until_parked();
2033
2034 // Exactly one prediction request should have been sent.
2035 let (request, respond_tx) = requests.predict.next().await.unwrap();
2036 respond_tx
2037 .send(model_response(&request, SIMPLE_DIFF))
2038 .unwrap();
2039 cx.run_until_parked();
2040
2041 // No second request should be pending.
2042 assert_no_predict_request_ready(&mut requests.predict);
2043}
2044
2045#[gpui::test]
2046async fn test_rejections_flushing(cx: &mut TestAppContext) {
2047 let (ep_store, mut requests) = init_test_with_fake_client(cx);
2048
2049 ep_store.update(cx, |ep_store, cx| {
2050 ep_store.reject_prediction(
2051 EditPredictionId("test-1".into()),
2052 EditPredictionRejectReason::Discarded,
2053 false,
2054 None,
2055 None,
2056 cx,
2057 );
2058 ep_store.reject_prediction(
2059 EditPredictionId("test-2".into()),
2060 EditPredictionRejectReason::Canceled,
2061 true,
2062 None,
2063 None,
2064 cx,
2065 );
2066 });
2067
2068 cx.executor().advance_clock(REJECT_REQUEST_DEBOUNCE);
2069 cx.run_until_parked();
2070
2071 let (reject_request, respond_tx) = requests.reject.next().await.unwrap();
2072 respond_tx.send(()).unwrap();
2073
2074 // batched
2075 assert_eq!(reject_request.rejections.len(), 2);
2076 assert_eq!(
2077 reject_request.rejections[0],
2078 EditPredictionRejection {
2079 request_id: "test-1".to_string(),
2080 reason: EditPredictionRejectReason::Discarded,
2081 was_shown: false,
2082 model_version: None,
2083 e2e_latency_ms: None
2084 }
2085 );
2086 assert_eq!(
2087 reject_request.rejections[1],
2088 EditPredictionRejection {
2089 request_id: "test-2".to_string(),
2090 reason: EditPredictionRejectReason::Canceled,
2091 was_shown: true,
2092 model_version: None,
2093 e2e_latency_ms: None
2094 }
2095 );
2096
2097 // Reaching batch size limit sends without debounce
2098 ep_store.update(cx, |ep_store, cx| {
2099 for i in 0..70 {
2100 ep_store.reject_prediction(
2101 EditPredictionId(format!("batch-{}", i).into()),
2102 EditPredictionRejectReason::Discarded,
2103 false,
2104 None,
2105 None,
2106 cx,
2107 );
2108 }
2109 });
2110
2111 // First MAX/2 items are sent immediately
2112 cx.run_until_parked();
2113 let (reject_request, respond_tx) = requests.reject.next().await.unwrap();
2114 respond_tx.send(()).unwrap();
2115
2116 assert_eq!(reject_request.rejections.len(), 50);
2117 assert_eq!(reject_request.rejections[0].request_id, "batch-0");
2118 assert_eq!(reject_request.rejections[49].request_id, "batch-49");
2119
2120 // Remaining items are debounced with the next batch
2121 cx.executor().advance_clock(Duration::from_secs(15));
2122 cx.run_until_parked();
2123
2124 let (reject_request, respond_tx) = requests.reject.next().await.unwrap();
2125 respond_tx.send(()).unwrap();
2126
2127 assert_eq!(reject_request.rejections.len(), 20);
2128 assert_eq!(reject_request.rejections[0].request_id, "batch-50");
2129 assert_eq!(reject_request.rejections[19].request_id, "batch-69");
2130
2131 // Request failure
2132 ep_store.update(cx, |ep_store, cx| {
2133 ep_store.reject_prediction(
2134 EditPredictionId("retry-1".into()),
2135 EditPredictionRejectReason::Discarded,
2136 false,
2137 None,
2138 None,
2139 cx,
2140 );
2141 });
2142
2143 cx.executor().advance_clock(REJECT_REQUEST_DEBOUNCE);
2144 cx.run_until_parked();
2145
2146 let (reject_request, _respond_tx) = requests.reject.next().await.unwrap();
2147 assert_eq!(reject_request.rejections.len(), 1);
2148 assert_eq!(reject_request.rejections[0].request_id, "retry-1");
2149 // Simulate failure
2150 drop(_respond_tx);
2151
2152 // Add another rejection
2153 ep_store.update(cx, |ep_store, cx| {
2154 ep_store.reject_prediction(
2155 EditPredictionId("retry-2".into()),
2156 EditPredictionRejectReason::Discarded,
2157 false,
2158 None,
2159 None,
2160 cx,
2161 );
2162 });
2163
2164 cx.executor().advance_clock(REJECT_REQUEST_DEBOUNCE);
2165 cx.run_until_parked();
2166
2167 // Retry should include both the failed item and the new one
2168 let (reject_request, respond_tx) = requests.reject.next().await.unwrap();
2169 respond_tx.send(()).unwrap();
2170
2171 assert_eq!(reject_request.rejections.len(), 2);
2172 assert_eq!(reject_request.rejections[0].request_id, "retry-1");
2173 assert_eq!(reject_request.rejections[1].request_id, "retry-2");
2174}
2175
2176#[gpui::test]
2177fn test_active_buffer_diagnostics_fetching(cx: &mut TestAppContext) {
2178 let diagnostic_marker: TextRangeMarker = ('«', '»').into();
2179 let search_range_marker: TextRangeMarker = ('[', ']').into();
2180
2181 let (text, mut ranges) = marked_text_ranges_by(
2182 indoc! {r#"
2183 fn alpha() {
2184 let «first_value» = 1;
2185 }
2186
2187 [fn beta() {
2188 let «second_value» = 2;
2189 let third_value = second_value + missing_symbol;
2190 }ˇ]
2191
2192 fn gamma() {
2193 let «fourth_value» = missing_other_symbol;
2194 }
2195 "#},
2196 vec![diagnostic_marker.clone(), search_range_marker.clone()],
2197 );
2198
2199 let diagnostic_ranges = ranges.remove(&diagnostic_marker).unwrap_or_default();
2200 let search_ranges = ranges.remove(&search_range_marker).unwrap_or_default();
2201
2202 let buffer = cx.new(|cx| Buffer::local(&text, cx));
2203
2204 buffer.update(cx, |buffer, cx| {
2205 let snapshot = buffer.snapshot();
2206 let diagnostics = DiagnosticSet::new(
2207 diagnostic_ranges
2208 .iter()
2209 .enumerate()
2210 .map(|(index, range)| DiagnosticEntry {
2211 range: snapshot.offset_to_point_utf16(range.start)
2212 ..snapshot.offset_to_point_utf16(range.end),
2213 diagnostic: Diagnostic {
2214 severity: match index {
2215 0 => DiagnosticSeverity::WARNING,
2216 1 => DiagnosticSeverity::ERROR,
2217 _ => DiagnosticSeverity::HINT,
2218 },
2219 message: match index {
2220 0 => "first warning".to_string(),
2221 1 => "second error".to_string(),
2222 _ => "third hint".to_string(),
2223 },
2224 group_id: index + 1,
2225 is_primary: true,
2226 source_kind: language::DiagnosticSourceKind::Pushed,
2227 ..Diagnostic::default()
2228 },
2229 }),
2230 &snapshot,
2231 );
2232 buffer.update_diagnostics(LanguageServerId(0), diagnostics, cx);
2233 });
2234
2235 let snapshot = buffer.read_with(cx, |buffer, _cx| buffer.snapshot());
2236 let search_range = snapshot.offset_to_point(search_ranges[0].start)
2237 ..snapshot.offset_to_point(search_ranges[0].end);
2238
2239 let active_buffer_diagnostics = zeta::active_buffer_diagnostics(&snapshot, search_range, 100);
2240
2241 assert_eq!(
2242 active_buffer_diagnostics,
2243 vec![zeta_prompt::ActiveBufferDiagnostic {
2244 severity: Some(1),
2245 message: "second error".to_string(),
2246 snippet: text,
2247 snippet_buffer_row_range: 5..5,
2248 diagnostic_range_in_snippet: 61..73,
2249 }]
2250 );
2251
2252 let buffer = cx.new(|cx| {
2253 Buffer::local(
2254 indoc! {"
2255 one
2256 two
2257 three
2258 four
2259 five
2260 "},
2261 cx,
2262 )
2263 });
2264
2265 buffer.update(cx, |buffer, cx| {
2266 let snapshot = buffer.snapshot();
2267 let diagnostics = DiagnosticSet::new(
2268 vec![
2269 DiagnosticEntry {
2270 range: text::PointUtf16::new(0, 0)..text::PointUtf16::new(0, 3),
2271 diagnostic: Diagnostic {
2272 severity: DiagnosticSeverity::ERROR,
2273 message: "row zero".to_string(),
2274 group_id: 1,
2275 is_primary: true,
2276 source_kind: language::DiagnosticSourceKind::Pushed,
2277 ..Diagnostic::default()
2278 },
2279 },
2280 DiagnosticEntry {
2281 range: text::PointUtf16::new(2, 0)..text::PointUtf16::new(2, 5),
2282 diagnostic: Diagnostic {
2283 severity: DiagnosticSeverity::WARNING,
2284 message: "row two".to_string(),
2285 group_id: 2,
2286 is_primary: true,
2287 source_kind: language::DiagnosticSourceKind::Pushed,
2288 ..Diagnostic::default()
2289 },
2290 },
2291 DiagnosticEntry {
2292 range: text::PointUtf16::new(4, 0)..text::PointUtf16::new(4, 4),
2293 diagnostic: Diagnostic {
2294 severity: DiagnosticSeverity::INFORMATION,
2295 message: "row four".to_string(),
2296 group_id: 3,
2297 is_primary: true,
2298 source_kind: language::DiagnosticSourceKind::Pushed,
2299 ..Diagnostic::default()
2300 },
2301 },
2302 ],
2303 &snapshot,
2304 );
2305 buffer.update_diagnostics(LanguageServerId(0), diagnostics, cx);
2306 });
2307
2308 let snapshot = buffer.read_with(cx, |buffer, _cx| buffer.snapshot());
2309
2310 let active_buffer_diagnostics =
2311 zeta::active_buffer_diagnostics(&snapshot, Point::new(2, 0)..Point::new(4, 0), 100);
2312
2313 assert_eq!(
2314 active_buffer_diagnostics
2315 .iter()
2316 .map(|diagnostic| (
2317 diagnostic.severity,
2318 diagnostic.message.clone(),
2319 diagnostic.snippet.clone(),
2320 diagnostic.snippet_buffer_row_range.clone(),
2321 diagnostic.diagnostic_range_in_snippet.clone(),
2322 ))
2323 .collect::<Vec<_>>(),
2324 vec![
2325 (
2326 Some(2),
2327 "row two".to_string(),
2328 "one\ntwo\nthree\nfour\nfive\n".to_string(),
2329 2..2,
2330 8..13,
2331 ),
2332 (
2333 Some(3),
2334 "row four".to_string(),
2335 "one\ntwo\nthree\nfour\nfive\n".to_string(),
2336 4..4,
2337 19..23,
2338 ),
2339 ]
2340 );
2341}
2342
2343// Generate a model response that would apply the given diff to the active file.
2344fn model_response(request: &PredictEditsV3Request, diff_to_apply: &str) -> PredictEditsV3Response {
2345 let editable_range =
2346 zeta_prompt::excerpt_range_for_format(Default::default(), &request.input.excerpt_ranges).1;
2347 let excerpt = request.input.cursor_excerpt[editable_range.clone()].to_string();
2348 let new_excerpt = apply_diff_to_string(diff_to_apply, &excerpt).unwrap();
2349
2350 PredictEditsV3Response {
2351 request_id: Uuid::new_v4().to_string(),
2352 editable_range,
2353 output: new_excerpt,
2354 model_version: None,
2355 }
2356}
2357
2358fn empty_response() -> PredictEditsV3Response {
2359 PredictEditsV3Response {
2360 request_id: Uuid::new_v4().to_string(),
2361 editable_range: 0..0,
2362 output: String::new(),
2363 model_version: None,
2364 }
2365}
2366
2367fn prompt_from_request(request: &PredictEditsV3Request) -> String {
2368 zeta_prompt::format_zeta_prompt(&request.input, zeta_prompt::ZetaFormat::default())
2369 .expect("default zeta prompt formatting should succeed in edit prediction tests")
2370}
2371
2372fn assert_no_predict_request_ready(
2373 requests: &mut mpsc::UnboundedReceiver<(
2374 PredictEditsV3Request,
2375 oneshot::Sender<PredictEditsV3Response>,
2376 )>,
2377) {
2378 if requests.next().now_or_never().flatten().is_some() {
2379 panic!("Unexpected prediction request while throttled.");
2380 }
2381}
2382
2383struct RequestChannels {
2384 predict: mpsc::UnboundedReceiver<(
2385 PredictEditsV3Request,
2386 oneshot::Sender<PredictEditsV3Response>,
2387 )>,
2388 reject: mpsc::UnboundedReceiver<(RejectEditPredictionsBody, oneshot::Sender<()>)>,
2389}
2390
2391fn init_test_with_fake_client(
2392 cx: &mut TestAppContext,
2393) -> (Entity<EditPredictionStore>, RequestChannels) {
2394 cx.update(move |cx| {
2395 let settings_store = SettingsStore::test(cx);
2396 cx.set_global(settings_store);
2397 zlog::init_test();
2398
2399 let (predict_req_tx, predict_req_rx) = mpsc::unbounded();
2400 let (reject_req_tx, reject_req_rx) = mpsc::unbounded();
2401
2402 let http_client = FakeHttpClient::create({
2403 move |req| {
2404 let uri = req.uri().path().to_string();
2405 let mut body = req.into_body();
2406 let predict_req_tx = predict_req_tx.clone();
2407 let reject_req_tx = reject_req_tx.clone();
2408 async move {
2409 let resp = match uri.as_str() {
2410 "/client/llm_tokens" => serde_json::to_string(&json!({
2411 "token": "test"
2412 }))
2413 .unwrap(),
2414 "/predict_edits/v3" => {
2415 let mut buf = Vec::new();
2416 body.read_to_end(&mut buf).await.ok();
2417 let decompressed = zstd::decode_all(&buf[..]).unwrap();
2418 let req = serde_json::from_slice(&decompressed).unwrap();
2419
2420 let (res_tx, res_rx) = oneshot::channel();
2421 predict_req_tx.unbounded_send((req, res_tx)).unwrap();
2422 serde_json::to_string(&res_rx.await?).unwrap()
2423 }
2424 "/predict_edits/reject" => {
2425 let mut buf = Vec::new();
2426 body.read_to_end(&mut buf).await.ok();
2427 let req = serde_json::from_slice(&buf).unwrap();
2428
2429 let (res_tx, res_rx) = oneshot::channel();
2430 reject_req_tx.unbounded_send((req, res_tx)).unwrap();
2431 serde_json::to_string(&res_rx.await?).unwrap()
2432 }
2433 _ => {
2434 panic!("Unexpected path: {}", uri)
2435 }
2436 };
2437
2438 Ok(Response::builder().body(resp.into()).unwrap())
2439 }
2440 }
2441 });
2442
2443 let client = client::Client::new(Arc::new(FakeSystemClock::new()), http_client, cx);
2444 client.cloud_client().set_credentials(1, "test".into());
2445
2446 let user_store = cx.new(|cx| UserStore::new(client.clone(), cx));
2447 language_model::init(cx);
2448 RefreshLlmTokenListener::register(client.clone(), user_store.clone(), cx);
2449 let ep_store = EditPredictionStore::global(&client, &user_store, cx);
2450
2451 (
2452 ep_store,
2453 RequestChannels {
2454 predict: predict_req_rx,
2455 reject: reject_req_rx,
2456 },
2457 )
2458 })
2459}
2460
2461#[gpui::test]
2462async fn test_edit_prediction_basic_interpolation(cx: &mut TestAppContext) {
2463 let buffer = cx.new(|cx| Buffer::local("Lorem ipsum dolor", cx));
2464 let edits: Arc<[(Range<Anchor>, Arc<str>)]> = cx.update(|cx| {
2465 to_completion_edits([(2..5, "REM".into()), (9..11, "".into())], &buffer, cx).into()
2466 });
2467
2468 let edit_preview = cx
2469 .read(|cx| buffer.read(cx).preview_edits(edits.clone(), cx))
2470 .await;
2471
2472 let prediction = EditPrediction {
2473 edits,
2474 cursor_position: None,
2475 edit_preview,
2476 buffer: buffer.clone(),
2477 snapshot: cx.read(|cx| buffer.read(cx).snapshot()),
2478 id: EditPredictionId("the-id".into()),
2479 inputs: ZetaPromptInput {
2480 events: Default::default(),
2481 related_files: Default::default(),
2482 active_buffer_diagnostics: vec![],
2483 cursor_path: Path::new("").into(),
2484 cursor_excerpt: "".into(),
2485 cursor_offset_in_excerpt: 0,
2486 excerpt_start_row: None,
2487 excerpt_ranges: Default::default(),
2488 syntax_ranges: None,
2489 in_open_source_repo: false,
2490 can_collect_data: false,
2491 repo_url: None,
2492 },
2493 model_version: None,
2494 };
2495
2496 cx.update(|cx| {
2497 assert_eq!(
2498 from_completion_edits(
2499 &prediction.interpolate(&buffer.read(cx).snapshot()).unwrap(),
2500 &buffer,
2501 cx
2502 ),
2503 vec![(2..5, "REM".into()), (9..11, "".into())]
2504 );
2505
2506 buffer.update(cx, |buffer, cx| buffer.edit([(2..5, "")], None, cx));
2507 assert_eq!(
2508 from_completion_edits(
2509 &prediction.interpolate(&buffer.read(cx).snapshot()).unwrap(),
2510 &buffer,
2511 cx
2512 ),
2513 vec![(2..2, "REM".into()), (6..8, "".into())]
2514 );
2515
2516 buffer.update(cx, |buffer, cx| buffer.undo(cx));
2517 assert_eq!(
2518 from_completion_edits(
2519 &prediction.interpolate(&buffer.read(cx).snapshot()).unwrap(),
2520 &buffer,
2521 cx
2522 ),
2523 vec![(2..5, "REM".into()), (9..11, "".into())]
2524 );
2525
2526 buffer.update(cx, |buffer, cx| buffer.edit([(2..5, "R")], None, cx));
2527 assert_eq!(
2528 from_completion_edits(
2529 &prediction.interpolate(&buffer.read(cx).snapshot()).unwrap(),
2530 &buffer,
2531 cx
2532 ),
2533 vec![(3..3, "EM".into()), (7..9, "".into())]
2534 );
2535
2536 buffer.update(cx, |buffer, cx| buffer.edit([(3..3, "E")], None, cx));
2537 assert_eq!(
2538 from_completion_edits(
2539 &prediction.interpolate(&buffer.read(cx).snapshot()).unwrap(),
2540 &buffer,
2541 cx
2542 ),
2543 vec![(4..4, "M".into()), (8..10, "".into())]
2544 );
2545
2546 buffer.update(cx, |buffer, cx| buffer.edit([(4..4, "M")], None, cx));
2547 assert_eq!(
2548 from_completion_edits(
2549 &prediction.interpolate(&buffer.read(cx).snapshot()).unwrap(),
2550 &buffer,
2551 cx
2552 ),
2553 vec![(9..11, "".into())]
2554 );
2555
2556 buffer.update(cx, |buffer, cx| buffer.edit([(4..5, "")], None, cx));
2557 assert_eq!(
2558 from_completion_edits(
2559 &prediction.interpolate(&buffer.read(cx).snapshot()).unwrap(),
2560 &buffer,
2561 cx
2562 ),
2563 vec![(4..4, "M".into()), (8..10, "".into())]
2564 );
2565
2566 buffer.update(cx, |buffer, cx| buffer.edit([(8..10, "")], None, cx));
2567 assert_eq!(
2568 from_completion_edits(
2569 &prediction.interpolate(&buffer.read(cx).snapshot()).unwrap(),
2570 &buffer,
2571 cx
2572 ),
2573 vec![(4..4, "M".into())]
2574 );
2575
2576 buffer.update(cx, |buffer, cx| buffer.edit([(4..6, "")], None, cx));
2577 assert_eq!(prediction.interpolate(&buffer.read(cx).snapshot()), None);
2578 })
2579}
2580
2581#[gpui::test]
2582async fn test_clean_up_diff(cx: &mut TestAppContext) {
2583 init_test(cx);
2584
2585 assert_eq!(
2586 apply_edit_prediction(
2587 indoc! {"
2588 fn main() {
2589 let word_1 = \"lorem\";
2590 let range = word.len()..word.len();
2591 }
2592 "},
2593 indoc! {"
2594 fn main() {
2595 let word_1 = \"lorem\";
2596 let range = word_1.len()..word_1.len();
2597 }
2598 "},
2599 cx,
2600 )
2601 .await,
2602 indoc! {"
2603 fn main() {
2604 let word_1 = \"lorem\";
2605 let range = word_1.len()..word_1.len();
2606 }
2607 "},
2608 );
2609
2610 assert_eq!(
2611 apply_edit_prediction(
2612 indoc! {"
2613 fn main() {
2614 let story = \"the quick\"
2615 }
2616 "},
2617 indoc! {"
2618 fn main() {
2619 let story = \"the quick brown fox jumps over the lazy dog\";
2620 }
2621 "},
2622 cx,
2623 )
2624 .await,
2625 indoc! {"
2626 fn main() {
2627 let story = \"the quick brown fox jumps over the lazy dog\";
2628 }
2629 "},
2630 );
2631}
2632
2633#[gpui::test]
2634async fn test_edit_prediction_end_of_buffer(cx: &mut TestAppContext) {
2635 init_test(cx);
2636
2637 let buffer_content = "lorem\n";
2638 let completion_response = "lorem\nipsum\n";
2639
2640 assert_eq!(
2641 apply_edit_prediction(buffer_content, completion_response, cx).await,
2642 "lorem\nipsum\n"
2643 );
2644}
2645
2646#[gpui::test]
2647async fn test_edit_prediction_no_spurious_trailing_newline(cx: &mut TestAppContext) {
2648 // Test that zeta2's newline normalization logic doesn't insert spurious newlines.
2649 // When the buffer ends without a trailing newline, but the model returns output
2650 // with a trailing newline, zeta2 should normalize both sides before diffing
2651 // so no spurious newline is inserted.
2652 let (ep_store, mut requests) = init_test_with_fake_client(cx);
2653 let fs = FakeFs::new(cx.executor());
2654
2655 // Single line buffer with no trailing newline
2656 fs.insert_tree(
2657 "/root",
2658 json!({
2659 "foo.txt": "hello"
2660 }),
2661 )
2662 .await;
2663 let project = Project::test(fs, vec![path!("/root").as_ref()], cx).await;
2664
2665 let buffer = project
2666 .update(cx, |project, cx| {
2667 let path = project
2668 .find_project_path(path!("root/foo.txt"), cx)
2669 .unwrap();
2670 project.open_buffer(path, cx)
2671 })
2672 .await
2673 .unwrap();
2674
2675 let snapshot = buffer.read_with(cx, |buffer, _cx| buffer.snapshot());
2676 let position = snapshot.anchor_before(language::Point::new(0, 5));
2677
2678 ep_store.update(cx, |ep_store, cx| {
2679 ep_store.refresh_prediction_from_buffer(project.clone(), buffer.clone(), position, cx);
2680 });
2681
2682 let (request, respond_tx) = requests.predict.next().await.unwrap();
2683
2684 // Model returns output WITH a trailing newline, even though the buffer doesn't have one.
2685 // Zeta2 should normalize both sides before diffing, so no spurious newline is inserted.
2686 let excerpt_length = request.input.cursor_excerpt.len();
2687 let response = PredictEditsV3Response {
2688 request_id: Uuid::new_v4().to_string(),
2689 output: "hello world\n".to_string(),
2690 editable_range: 0..excerpt_length,
2691 model_version: None,
2692 };
2693 respond_tx.send(response).unwrap();
2694
2695 cx.run_until_parked();
2696
2697 // The prediction should insert " world" without adding a newline
2698 ep_store.update(cx, |ep_store, cx| {
2699 let prediction = ep_store
2700 .prediction_at(&buffer, None, &project, cx)
2701 .expect("should have prediction");
2702 let edits: Vec<_> = prediction
2703 .edits
2704 .iter()
2705 .map(|(range, text)| {
2706 let snapshot = buffer.read(cx).snapshot();
2707 (range.to_offset(&snapshot), text.clone())
2708 })
2709 .collect();
2710 assert_eq!(edits, vec![(5..5, " world".into())]);
2711 });
2712}
2713
2714#[gpui::test]
2715async fn test_v3_prediction_strips_cursor_marker_from_edit_text(cx: &mut TestAppContext) {
2716 let (ep_store, mut requests) = init_test_with_fake_client(cx);
2717 let fs = FakeFs::new(cx.executor());
2718
2719 fs.insert_tree(
2720 "/root",
2721 json!({
2722 "foo.txt": "hello"
2723 }),
2724 )
2725 .await;
2726 let project = Project::test(fs, vec![path!("/root").as_ref()], cx).await;
2727
2728 let buffer = project
2729 .update(cx, |project, cx| {
2730 let path = project
2731 .find_project_path(path!("root/foo.txt"), cx)
2732 .unwrap();
2733 project.open_buffer(path, cx)
2734 })
2735 .await
2736 .unwrap();
2737
2738 let snapshot = buffer.read_with(cx, |buffer, _cx| buffer.snapshot());
2739 let position = snapshot.anchor_before(language::Point::new(0, 5));
2740
2741 ep_store.update(cx, |ep_store, cx| {
2742 ep_store.refresh_prediction_from_buffer(project.clone(), buffer.clone(), position, cx);
2743 });
2744
2745 let (request, respond_tx) = requests.predict.next().await.unwrap();
2746 let excerpt_length = request.input.cursor_excerpt.len();
2747 respond_tx
2748 .send(PredictEditsV3Response {
2749 request_id: Uuid::new_v4().to_string(),
2750 output: "hello<|user_cursor|> world".to_string(),
2751 editable_range: 0..excerpt_length,
2752 model_version: None,
2753 })
2754 .unwrap();
2755
2756 cx.run_until_parked();
2757
2758 ep_store.update(cx, |ep_store, cx| {
2759 let prediction = ep_store
2760 .prediction_at(&buffer, None, &project, cx)
2761 .expect("should have prediction");
2762 let snapshot = buffer.read(cx).snapshot();
2763 let edits: Vec<_> = prediction
2764 .edits
2765 .iter()
2766 .map(|(range, text)| (range.to_offset(&snapshot), text.clone()))
2767 .collect();
2768
2769 assert_eq!(edits, vec![(5..5, " world".into())]);
2770 });
2771}
2772
2773fn init_test(cx: &mut TestAppContext) {
2774 cx.update(|cx| {
2775 let settings_store = SettingsStore::test(cx);
2776 cx.set_global(settings_store);
2777 });
2778}
2779
2780async fn apply_edit_prediction(
2781 buffer_content: &str,
2782 completion_response: &str,
2783 cx: &mut TestAppContext,
2784) -> String {
2785 let fs = project::FakeFs::new(cx.executor());
2786 let project = Project::test(fs.clone(), [path!("/project").as_ref()], cx).await;
2787 let buffer = cx.new(|cx| Buffer::local(buffer_content, cx));
2788 let (ep_store, response) = make_test_ep_store(&project, cx).await;
2789 *response.lock() = completion_response.to_string();
2790 let edit_prediction = run_edit_prediction(&buffer, &project, &ep_store, cx).await;
2791 buffer.update(cx, |buffer, cx| {
2792 buffer.edit(edit_prediction.edits.iter().cloned(), None, cx)
2793 });
2794 buffer.read_with(cx, |buffer, _| buffer.text())
2795}
2796
2797async fn run_edit_prediction(
2798 buffer: &Entity<Buffer>,
2799 project: &Entity<Project>,
2800 ep_store: &Entity<EditPredictionStore>,
2801 cx: &mut TestAppContext,
2802) -> EditPrediction {
2803 let cursor = buffer.read_with(cx, |buffer, _| buffer.anchor_before(Point::new(1, 0)));
2804 ep_store.update(cx, |ep_store, cx| {
2805 ep_store.register_buffer(buffer, &project, cx)
2806 });
2807 cx.background_executor.run_until_parked();
2808 let prediction_task = ep_store.update(cx, |ep_store, cx| {
2809 ep_store.request_prediction(&project, buffer, cursor, Default::default(), cx)
2810 });
2811 prediction_task.await.unwrap().unwrap().prediction.unwrap()
2812}
2813
2814async fn make_test_ep_store(
2815 project: &Entity<Project>,
2816 cx: &mut TestAppContext,
2817) -> (Entity<EditPredictionStore>, Arc<Mutex<String>>) {
2818 let default_response = "hello world\n".to_string();
2819 let completion_response: Arc<Mutex<String>> = Arc::new(Mutex::new(default_response));
2820 let http_client = FakeHttpClient::create({
2821 let completion_response = completion_response.clone();
2822 let mut next_request_id = 0;
2823 move |req| {
2824 let completion_response = completion_response.clone();
2825 let method = req.method().clone();
2826 let uri = req.uri().path().to_string();
2827 let mut body = req.into_body();
2828 async move {
2829 match (method, uri.as_str()) {
2830 (Method::POST, "/client/llm_tokens") => Ok(http_client::Response::builder()
2831 .status(200)
2832 .body(
2833 serde_json::to_string(&CreateLlmTokenResponse {
2834 token: LlmToken("the-llm-token".to_string()),
2835 })
2836 .unwrap()
2837 .into(),
2838 )
2839 .unwrap()),
2840 (Method::POST, "/predict_edits/v3") => {
2841 let mut buf = Vec::new();
2842 body.read_to_end(&mut buf).await.ok();
2843 let decompressed = zstd::decode_all(&buf[..]).unwrap();
2844 let req: PredictEditsV3Request =
2845 serde_json::from_slice(&decompressed).unwrap();
2846
2847 next_request_id += 1;
2848 Ok(http_client::Response::builder()
2849 .status(200)
2850 .body(
2851 serde_json::to_string(&PredictEditsV3Response {
2852 request_id: format!("request-{next_request_id}"),
2853 editable_range: 0..req.input.cursor_excerpt.len(),
2854 output: completion_response.lock().clone(),
2855 model_version: None,
2856 })
2857 .unwrap()
2858 .into(),
2859 )
2860 .unwrap())
2861 }
2862 _ => Ok(http_client::Response::builder()
2863 .status(404)
2864 .body("Not Found".to_string().into())
2865 .unwrap()),
2866 }
2867 }
2868 }
2869 });
2870
2871 let client = cx.update(|cx| Client::new(Arc::new(FakeSystemClock::new()), http_client, cx));
2872 let user_store = cx.update(|cx| cx.new(|cx| client::UserStore::new(client.clone(), cx)));
2873 cx.update(|cx| {
2874 RefreshLlmTokenListener::register(client.clone(), user_store.clone(), cx);
2875 });
2876 let _server = FakeServer::for_client(42, &client, cx).await;
2877
2878 let ep_store = cx.new(|cx| {
2879 let mut ep_store = EditPredictionStore::new(client, project.read(cx).user_store(), cx);
2880 ep_store.set_edit_prediction_model(EditPredictionModel::Zeta);
2881
2882 let worktrees = project.read(cx).worktrees(cx).collect::<Vec<_>>();
2883 for worktree in worktrees {
2884 let worktree_id = worktree.read(cx).id();
2885 ep_store
2886 .get_or_init_project(project, cx)
2887 .license_detection_watchers
2888 .entry(worktree_id)
2889 .or_insert_with(|| Rc::new(LicenseDetectionWatcher::new(&worktree, cx)));
2890 }
2891
2892 ep_store
2893 });
2894
2895 (ep_store, completion_response)
2896}
2897
2898fn to_completion_edits(
2899 iterator: impl IntoIterator<Item = (Range<usize>, Arc<str>)>,
2900 buffer: &Entity<Buffer>,
2901 cx: &App,
2902) -> Vec<(Range<Anchor>, Arc<str>)> {
2903 let buffer = buffer.read(cx);
2904 iterator
2905 .into_iter()
2906 .map(|(range, text)| {
2907 (
2908 buffer.anchor_after(range.start)..buffer.anchor_before(range.end),
2909 text,
2910 )
2911 })
2912 .collect()
2913}
2914
2915fn from_completion_edits(
2916 editor_edits: &[(Range<Anchor>, Arc<str>)],
2917 buffer: &Entity<Buffer>,
2918 cx: &App,
2919) -> Vec<(Range<usize>, Arc<str>)> {
2920 let buffer = buffer.read(cx);
2921 editor_edits
2922 .iter()
2923 .map(|(range, text)| {
2924 (
2925 range.start.to_offset(buffer)..range.end.to_offset(buffer),
2926 text.clone(),
2927 )
2928 })
2929 .collect()
2930}
2931
2932#[gpui::test]
2933async fn test_unauthenticated_without_custom_url_blocks_prediction_impl(cx: &mut TestAppContext) {
2934 init_test(cx);
2935
2936 let fs = FakeFs::new(cx.executor());
2937 fs.insert_tree(
2938 "/project",
2939 serde_json::json!({
2940 "main.rs": "fn main() {\n \n}\n"
2941 }),
2942 )
2943 .await;
2944
2945 let project = Project::test(fs.clone(), [path!("/project").as_ref()], cx).await;
2946
2947 let http_client = FakeHttpClient::create(|_req| async move {
2948 Ok(gpui::http_client::Response::builder()
2949 .status(401)
2950 .body("Unauthorized".into())
2951 .unwrap())
2952 });
2953
2954 let client =
2955 cx.update(|cx| client::Client::new(Arc::new(FakeSystemClock::new()), http_client, cx));
2956 let user_store = cx.update(|cx| cx.new(|cx| client::UserStore::new(client.clone(), cx)));
2957 cx.update(|cx| {
2958 RefreshLlmTokenListener::register(client.clone(), user_store.clone(), cx);
2959 });
2960
2961 let ep_store = cx.new(|cx| EditPredictionStore::new(client, project.read(cx).user_store(), cx));
2962
2963 let buffer = project
2964 .update(cx, |project, cx| {
2965 let path = project
2966 .find_project_path(path!("/project/main.rs"), cx)
2967 .unwrap();
2968 project.open_buffer(path, cx)
2969 })
2970 .await
2971 .unwrap();
2972
2973 let cursor = buffer.read_with(cx, |buffer, _| buffer.anchor_before(Point::new(1, 4)));
2974 ep_store.update(cx, |ep_store, cx| {
2975 ep_store.register_buffer(&buffer, &project, cx)
2976 });
2977 cx.background_executor.run_until_parked();
2978
2979 let completion_task = ep_store.update(cx, |ep_store, cx| {
2980 ep_store.set_edit_prediction_model(EditPredictionModel::Zeta);
2981 ep_store.request_prediction(&project, &buffer, cursor, Default::default(), cx)
2982 });
2983
2984 let result = completion_task.await;
2985 assert!(
2986 result.is_err(),
2987 "Without authentication and without custom URL, prediction should fail"
2988 );
2989}
2990
2991#[gpui::test]
2992async fn test_diagnostic_jump_excludes_collaborator_regions(cx: &mut TestAppContext) {
2993 fn set_collaborator_cursor(buffer: &Entity<Buffer>, row: u32, cx: &mut TestAppContext) {
2994 let collab_replica = clock::ReplicaId::new(10);
2995 let anchor = buffer.read_with(cx, |buffer, _| {
2996 buffer.snapshot().anchor_before(Point::new(row, 0))
2997 });
2998 let selections: Arc<[Selection<Anchor>]> = Arc::new([Selection {
2999 id: 1,
3000 start: anchor,
3001 end: anchor,
3002 reversed: false,
3003 goal: SelectionGoal::None,
3004 }]);
3005 buffer.update(cx, |buffer, cx| {
3006 buffer.apply_ops(
3007 [Operation::UpdateSelections {
3008 selections,
3009 lamport_timestamp: clock::Lamport {
3010 replica_id: collab_replica,
3011 value: 1,
3012 },
3013 line_mode: false,
3014 cursor_shape: CursorShape::Bar,
3015 }],
3016 cx,
3017 );
3018 });
3019 }
3020
3021 fn publish_diagnostics(
3022 uri_path: &'static str,
3023 rows: &[u32],
3024 project: &Entity<Project>,
3025 cx: &mut TestAppContext,
3026 ) {
3027 let diagnostics: Vec<_> = rows
3028 .iter()
3029 .map(|&row| lsp::Diagnostic {
3030 range: lsp::Range::new(lsp::Position::new(row, 0), lsp::Position::new(row, 5)),
3031 severity: Some(lsp::DiagnosticSeverity::ERROR),
3032 message: format!("error at row {row}"),
3033 ..Default::default()
3034 })
3035 .collect();
3036 project.update(cx, |project, cx| {
3037 project.lsp_store().update(cx, |lsp_store, cx| {
3038 lsp_store
3039 .update_diagnostics(
3040 LanguageServerId(0),
3041 lsp::PublishDiagnosticsParams {
3042 uri: lsp::Uri::from_file_path(uri_path).expect("invalid uri"),
3043 diagnostics,
3044 version: None,
3045 },
3046 None,
3047 language::DiagnosticSourceKind::Pushed,
3048 &[],
3049 cx,
3050 )
3051 .expect("failed to update diagnostics");
3052 });
3053 });
3054 }
3055
3056 init_test(cx);
3057
3058 let mut lines = String::new();
3059 for i in 0..60 {
3060 lines.push_str(&format!("line {i}\n"));
3061 }
3062
3063 let fs = FakeFs::new(cx.executor());
3064 fs.insert_tree(
3065 "/root",
3066 json!({
3067 "active.txt": lines,
3068 "collab_file.txt": "error here\nsecond line\n",
3069 "free_file.txt": "another error\nsecond line\n",
3070 }),
3071 )
3072 .await;
3073 let project = Project::test(fs, vec![path!("/root").as_ref()], cx).await;
3074
3075 let active_buffer = project
3076 .update(cx, |project, cx| {
3077 let path = project
3078 .find_project_path(path!("/root/active.txt"), cx)
3079 .expect("active.txt not found");
3080 project.set_active_path(Some(path.clone()), cx);
3081 project.open_buffer(path, cx)
3082 })
3083 .await
3084 .expect("failed to open active buffer");
3085
3086 set_collaborator_cursor(&active_buffer, 5, cx);
3087
3088 publish_diagnostics(path!("/root/active.txt"), &[3, 25, 50], &project, cx);
3089
3090 cx.run_until_parked();
3091
3092 let cursor_point = Point::new(25, 0);
3093 let empty_search_range: Range<Point> = Default::default();
3094
3095 let snapshot = active_buffer.read_with(cx, |buffer, _| buffer.snapshot());
3096 let result = EditPredictionStore::next_diagnostic_location(
3097 active_buffer.clone(),
3098 &snapshot,
3099 empty_search_range.clone(),
3100 cursor_point,
3101 &project,
3102 &mut cx.to_async(),
3103 )
3104 .await
3105 .expect("next_diagnostic_location failed");
3106
3107 let (result_buffer, result_anchor) = result.expect("expected a diagnostic location");
3108 assert_eq!(result_buffer.entity_id(), active_buffer.entity_id());
3109 let result_row = result_buffer.read_with(cx, |buffer, _| {
3110 result_anchor.to_point(&buffer.snapshot()).row
3111 });
3112 assert_ne!(
3113 result_row, 3,
3114 "row 3 is near collaborator (row 5) but far from local cursor (row 25), should be excluded"
3115 );
3116 assert!(
3117 result_row == 25 || result_row == 50,
3118 "expected row 25 or 50, got {result_row}"
3119 );
3120
3121 let snapshot_near = active_buffer.read_with(cx, |buffer, _| buffer.snapshot());
3122 let near_cursor_point = Point::new(4, 0);
3123 let result_near = EditPredictionStore::next_diagnostic_location(
3124 active_buffer.clone(),
3125 &snapshot_near,
3126 empty_search_range.clone(),
3127 near_cursor_point,
3128 &project,
3129 &mut cx.to_async(),
3130 )
3131 .await
3132 .expect("next_diagnostic_location failed");
3133
3134 let (_, near_anchor) = result_near.expect("expected a diagnostic location when both are near");
3135 let near_row =
3136 active_buffer.read_with(cx, |buffer, _| near_anchor.to_point(&buffer.snapshot()).row);
3137 assert_eq!(
3138 near_row, 3,
3139 "row 3 should be included when local cursor (row 4) is also near the collaborator"
3140 );
3141
3142 let snapshot_far = active_buffer.read_with(cx, |buffer, _| buffer.snapshot());
3143 let far_cursor_point = Point::new(50, 0);
3144 let result_far = EditPredictionStore::next_diagnostic_location(
3145 active_buffer.clone(),
3146 &snapshot_far,
3147 empty_search_range.clone(),
3148 far_cursor_point,
3149 &project,
3150 &mut cx.to_async(),
3151 )
3152 .await
3153 .expect("next_diagnostic_location failed");
3154
3155 let (_, far_anchor) = result_far.expect("expected a diagnostic location");
3156 let far_row =
3157 active_buffer.read_with(cx, |buffer, _| far_anchor.to_point(&buffer.snapshot()).row);
3158 assert_eq!(
3159 far_row, 50,
3160 "row 50 is near local cursor (row 50) and far from collaborator, should be picked"
3161 );
3162
3163 publish_diagnostics(path!("/root/collab_file.txt"), &[0], &project, cx);
3164 publish_diagnostics(path!("/root/free_file.txt"), &[0], &project, cx);
3165 cx.run_until_parked();
3166
3167 let collab_buffer = project
3168 .update(cx, |project, cx| {
3169 let path = project
3170 .find_project_path(path!("/root/collab_file.txt"), cx)
3171 .expect("collab_file.txt not found");
3172 project.open_buffer(path, cx)
3173 })
3174 .await
3175 .expect("failed to open collab buffer");
3176
3177 set_collaborator_cursor(&collab_buffer, 0, cx);
3178 cx.run_until_parked();
3179
3180 let no_same_file_search_range = Point::new(0, 0)..Point::new(59, 0);
3181 let snapshot_cross = active_buffer.read_with(cx, |buffer, _| buffer.snapshot());
3182 let result_cross = EditPredictionStore::next_diagnostic_location(
3183 active_buffer.clone(),
3184 &snapshot_cross,
3185 no_same_file_search_range,
3186 Point::new(0, 0),
3187 &project,
3188 &mut cx.to_async(),
3189 )
3190 .await
3191 .expect("cross-file next_diagnostic_location failed");
3192
3193 let (cross_buffer, _) = result_cross.expect("expected a cross-file diagnostic location");
3194 let cross_path = cross_buffer.read_with(cx, |buffer, cx| {
3195 buffer
3196 .file()
3197 .expect("buffer should have a file")
3198 .full_path(cx)
3199 });
3200 assert_eq!(
3201 cross_path,
3202 Path::new(path!("root/free_file.txt")),
3203 "should skip collab_file.txt (has collaborator) and pick free_file.txt"
3204 );
3205}
3206
3207#[gpui::test]
3208async fn test_edit_prediction_settled(cx: &mut TestAppContext) {
3209 let (ep_store, _requests) = init_test_with_fake_client(cx);
3210 let fs = FakeFs::new(cx.executor());
3211
3212 // Buffer with two clearly separated regions:
3213 // Region A = lines 0-9 (offsets 0..50)
3214 // Region B = lines 20-29 (offsets 105..155)
3215 // A big gap in between so edits in one region never overlap the other.
3216 let mut content = String::new();
3217 for i in 0..30 {
3218 content.push_str(&format!("line {i:02}\n"));
3219 }
3220
3221 fs.insert_tree(
3222 "/root",
3223 json!({
3224 "foo.md": content.clone()
3225 }),
3226 )
3227 .await;
3228 let project = Project::test(fs, vec![path!("/root").as_ref()], cx).await;
3229
3230 let buffer = project
3231 .update(cx, |project, cx| {
3232 let path = project.find_project_path(path!("root/foo.md"), cx).unwrap();
3233 project.open_buffer(path, cx)
3234 })
3235 .await
3236 .unwrap();
3237
3238 type SettledEventRecord = (EditPredictionId, String);
3239 let settled_events: Arc<Mutex<Vec<SettledEventRecord>>> = Arc::new(Mutex::new(Vec::new()));
3240
3241 ep_store.update(cx, |ep_store, cx| {
3242 ep_store.register_buffer(&buffer, &project, cx);
3243
3244 let settled_events = settled_events.clone();
3245 ep_store.settled_event_callback = Some(Box::new(move |id, text| {
3246 settled_events.lock().push((id, text));
3247 }));
3248 });
3249
3250 // --- Phase 1: edit in region A and enqueue prediction A ---
3251
3252 buffer.update(cx, |buffer, cx| {
3253 // Edit at the start of line 0.
3254 buffer.edit(vec![(0..0, "ADDED ")], None, cx);
3255 });
3256 cx.run_until_parked();
3257
3258 let snapshot_a = buffer.read_with(cx, |buffer, _cx| buffer.snapshot());
3259 let empty_edits: Arc<[(Range<Anchor>, Arc<str>)]> = Vec::new().into();
3260 let edit_preview_a = buffer
3261 .read_with(cx, |buffer, cx| {
3262 buffer.preview_edits(empty_edits.clone(), cx)
3263 })
3264 .await;
3265
3266 // Region A: first 10 lines of the buffer.
3267 let editable_region_a = 0..snapshot_a.point_to_offset(Point::new(10, 0));
3268
3269 ep_store.update(cx, |ep_store, cx| {
3270 ep_store.enqueue_settled_prediction(
3271 EditPredictionId("prediction-a".into()),
3272 &project,
3273 &buffer,
3274 &snapshot_a,
3275 editable_region_a.clone(),
3276 &edit_preview_a,
3277 None,
3278 Duration::from_secs(0),
3279 cx,
3280 );
3281 });
3282
3283 // --- Phase 2: repeatedly edit in region A to keep it unsettled ---
3284
3285 // Let the worker process the channel message before we start advancing.
3286 cx.run_until_parked();
3287
3288 let mut region_a_edit_offset = 5;
3289 for _ in 0..3 {
3290 // Edit inside region A (not at the boundary) so `last_edit_at` is
3291 // updated before the worker's next wake.
3292 buffer.update(cx, |buffer, cx| {
3293 buffer.edit(
3294 vec![(region_a_edit_offset..region_a_edit_offset, "x")],
3295 None,
3296 cx,
3297 );
3298 });
3299 region_a_edit_offset += 1;
3300 cx.run_until_parked();
3301
3302 cx.executor()
3303 .advance_clock(EDIT_PREDICTION_SETTLED_QUIESCENCE / 2);
3304 cx.run_until_parked();
3305 assert!(
3306 settled_events.lock().is_empty(),
3307 "no settled events should fire while region A is still being edited"
3308 );
3309 }
3310
3311 // Still nothing settled.
3312 assert!(settled_events.lock().is_empty());
3313
3314 // --- Phase 3: edit in distinct region B, enqueue prediction B ---
3315 // Advance a small amount so B's quiescence window starts later than A's,
3316 // but not so much that A settles (A's last edit was at the start of
3317 // iteration 3, and it needs a full Q to settle).
3318 cx.executor()
3319 .advance_clock(EDIT_PREDICTION_SETTLED_QUIESCENCE / 4);
3320 cx.run_until_parked();
3321 assert!(settled_events.lock().is_empty());
3322
3323 let snapshot_b = buffer.read_with(cx, |buffer, _cx| buffer.snapshot());
3324 let line_20_offset = snapshot_b.point_to_offset(Point::new(20, 0));
3325
3326 buffer.update(cx, |buffer, cx| {
3327 buffer.edit(vec![(line_20_offset..line_20_offset, "NEW ")], None, cx);
3328 });
3329 cx.run_until_parked();
3330
3331 let snapshot_b2 = buffer.read_with(cx, |buffer, _cx| buffer.snapshot());
3332 let edit_preview_b = buffer
3333 .read_with(cx, |buffer, cx| buffer.preview_edits(empty_edits, cx))
3334 .await;
3335 let editable_region_b = line_20_offset..snapshot_b2.point_to_offset(Point::new(25, 0));
3336
3337 ep_store.update(cx, |ep_store, cx| {
3338 ep_store.enqueue_settled_prediction(
3339 EditPredictionId("prediction-b".into()),
3340 &project,
3341 &buffer,
3342 &snapshot_b2,
3343 editable_region_b.clone(),
3344 &edit_preview_b,
3345 None,
3346 Duration::from_secs(0),
3347 cx,
3348 );
3349 });
3350
3351 cx.run_until_parked();
3352 assert!(
3353 settled_events.lock().is_empty(),
3354 "neither prediction should have settled yet"
3355 );
3356
3357 // --- Phase 4: let enough time pass for region A to settle ---
3358 // A's last edit was at T_a (during the last loop iteration). The worker is
3359 // sleeping until T_a + Q. We advance just enough to reach that wake time
3360 // (Q/4 since we already advanced Q/4 in phase 3 on top of the loop's
3361 // 3*Q/2). At that point A has been quiet for Q and settles, but B was
3362 // enqueued only Q/4 ago and stays pending.
3363 cx.executor()
3364 .advance_clock(EDIT_PREDICTION_SETTLED_QUIESCENCE / 4);
3365 cx.run_until_parked();
3366
3367 {
3368 let events = settled_events.lock().clone();
3369 assert_eq!(
3370 events.len(),
3371 1,
3372 "prediction and capture_sample for A should have settled, got: {events:?}"
3373 );
3374 assert_eq!(events[0].0, EditPredictionId("prediction-a".into()));
3375 }
3376
3377 // --- Phase 5: let more time pass for region B to settle ---
3378 // B's last edit was Q/4 before A settled. The worker rescheduled to
3379 // B's last_edit_at + Q, which is 3Q/4 from now.
3380 cx.executor()
3381 .advance_clock(EDIT_PREDICTION_SETTLED_QUIESCENCE * 3 / 4);
3382 cx.run_until_parked();
3383
3384 {
3385 let events = settled_events.lock().clone();
3386 assert_eq!(
3387 events.len(),
3388 2,
3389 "both prediction and capture_sample settled events should be emitted for each request, got: {events:?}"
3390 );
3391 assert_eq!(events[1].0, EditPredictionId("prediction-b".into()));
3392 }
3393}
3394
3395#[ctor::ctor]
3396fn init_logger() {
3397 zlog::init_test();
3398}