1use super::*;
2use acp_thread::{
3 AgentConnection, AgentModelGroupName, AgentModelList, PermissionOptions, ThreadStatus,
4 UserMessageId,
5};
6use agent_client_protocol::{self as acp};
7use agent_settings::AgentProfileId;
8use anyhow::Result;
9use client::{Client, RefreshLlmTokenListener, UserStore};
10use collections::IndexMap;
11use context_server::{ContextServer, ContextServerCommand, ContextServerId};
12use feature_flags::FeatureFlagAppExt as _;
13use fs::{FakeFs, Fs};
14use futures::{
15 FutureExt as _, StreamExt,
16 channel::{
17 mpsc::{self, UnboundedReceiver},
18 oneshot,
19 },
20 future::{Fuse, Shared},
21};
22use gpui::{
23 App, AppContext, AsyncApp, Entity, Task, TestAppContext, UpdateGlobal,
24 http_client::FakeHttpClient,
25};
26use indoc::indoc;
27use language_model::{
28 CompletionIntent, LanguageModel, LanguageModelCompletionError, LanguageModelCompletionEvent,
29 LanguageModelId, LanguageModelProviderName, LanguageModelRegistry, LanguageModelRequest,
30 LanguageModelRequestMessage, LanguageModelToolResult, LanguageModelToolSchemaFormat,
31 LanguageModelToolUse, MessageContent, Role, StopReason, TokenUsage,
32 fake_provider::FakeLanguageModel,
33};
34use pretty_assertions::assert_eq;
35use project::{
36 Project, context_server_store::ContextServerStore, project_settings::ProjectSettings,
37};
38use prompt_store::ProjectContext;
39use reqwest_client::ReqwestClient;
40use schemars::JsonSchema;
41use serde::{Deserialize, Serialize};
42use serde_json::json;
43use settings::{Settings, SettingsStore};
44use std::{
45 path::Path,
46 pin::Pin,
47 rc::Rc,
48 sync::{
49 Arc,
50 atomic::{AtomicBool, AtomicUsize, Ordering},
51 },
52 time::Duration,
53};
54use util::path;
55
56mod edit_file_thread_test;
57mod test_tools;
58use test_tools::*;
59
60pub(crate) fn init_test(cx: &mut TestAppContext) {
61 cx.update(|cx| {
62 let settings_store = SettingsStore::test(cx);
63 cx.set_global(settings_store);
64 });
65}
66
67pub(crate) struct FakeTerminalHandle {
68 killed: Arc<AtomicBool>,
69 stopped_by_user: Arc<AtomicBool>,
70 exit_sender: std::cell::RefCell<Option<futures::channel::oneshot::Sender<()>>>,
71 wait_for_exit: Shared<Task<acp::TerminalExitStatus>>,
72 output: acp::TerminalOutputResponse,
73 id: acp::TerminalId,
74}
75
76impl FakeTerminalHandle {
77 pub(crate) fn new_never_exits(cx: &mut App) -> Self {
78 let killed = Arc::new(AtomicBool::new(false));
79 let stopped_by_user = Arc::new(AtomicBool::new(false));
80
81 let (exit_sender, exit_receiver) = futures::channel::oneshot::channel();
82
83 let wait_for_exit = cx
84 .spawn(async move |_cx| {
85 // Wait for the exit signal (sent when kill() is called)
86 let _ = exit_receiver.await;
87 acp::TerminalExitStatus::new()
88 })
89 .shared();
90
91 Self {
92 killed,
93 stopped_by_user,
94 exit_sender: std::cell::RefCell::new(Some(exit_sender)),
95 wait_for_exit,
96 output: acp::TerminalOutputResponse::new("partial output".to_string(), false),
97 id: acp::TerminalId::new("fake_terminal".to_string()),
98 }
99 }
100
101 pub(crate) fn new_with_immediate_exit(cx: &mut App, exit_code: u32) -> Self {
102 let killed = Arc::new(AtomicBool::new(false));
103 let stopped_by_user = Arc::new(AtomicBool::new(false));
104 let (exit_sender, _exit_receiver) = futures::channel::oneshot::channel();
105
106 let wait_for_exit = cx
107 .spawn(async move |_cx| acp::TerminalExitStatus::new().exit_code(exit_code))
108 .shared();
109
110 Self {
111 killed,
112 stopped_by_user,
113 exit_sender: std::cell::RefCell::new(Some(exit_sender)),
114 wait_for_exit,
115 output: acp::TerminalOutputResponse::new("command output".to_string(), false),
116 id: acp::TerminalId::new("fake_terminal".to_string()),
117 }
118 }
119
120 pub(crate) fn was_killed(&self) -> bool {
121 self.killed.load(Ordering::SeqCst)
122 }
123
124 pub(crate) fn set_stopped_by_user(&self, stopped: bool) {
125 self.stopped_by_user.store(stopped, Ordering::SeqCst);
126 }
127
128 pub(crate) fn signal_exit(&self) {
129 if let Some(sender) = self.exit_sender.borrow_mut().take() {
130 let _ = sender.send(());
131 }
132 }
133}
134
135impl crate::TerminalHandle for FakeTerminalHandle {
136 fn id(&self, _cx: &AsyncApp) -> Result<acp::TerminalId> {
137 Ok(self.id.clone())
138 }
139
140 fn current_output(&self, _cx: &AsyncApp) -> Result<acp::TerminalOutputResponse> {
141 Ok(self.output.clone())
142 }
143
144 fn wait_for_exit(&self, _cx: &AsyncApp) -> Result<Shared<Task<acp::TerminalExitStatus>>> {
145 Ok(self.wait_for_exit.clone())
146 }
147
148 fn kill(&self, _cx: &AsyncApp) -> Result<()> {
149 self.killed.store(true, Ordering::SeqCst);
150 self.signal_exit();
151 Ok(())
152 }
153
154 fn was_stopped_by_user(&self, _cx: &AsyncApp) -> Result<bool> {
155 Ok(self.stopped_by_user.load(Ordering::SeqCst))
156 }
157}
158
159struct FakeSubagentHandle {
160 session_id: acp::SessionId,
161 send_task: Shared<Task<String>>,
162}
163
164impl SubagentHandle for FakeSubagentHandle {
165 fn id(&self) -> acp::SessionId {
166 self.session_id.clone()
167 }
168
169 fn num_entries(&self, _cx: &App) -> usize {
170 unimplemented!()
171 }
172
173 fn send(&self, _message: String, cx: &AsyncApp) -> Task<Result<String>> {
174 let task = self.send_task.clone();
175 cx.background_spawn(async move { Ok(task.await) })
176 }
177}
178
179#[derive(Default)]
180pub(crate) struct FakeThreadEnvironment {
181 terminal_handle: Option<Rc<FakeTerminalHandle>>,
182 subagent_handle: Option<Rc<FakeSubagentHandle>>,
183 terminal_creations: Arc<AtomicUsize>,
184}
185
186impl FakeThreadEnvironment {
187 pub(crate) fn with_terminal(self, terminal_handle: FakeTerminalHandle) -> Self {
188 Self {
189 terminal_handle: Some(terminal_handle.into()),
190 ..self
191 }
192 }
193
194 pub(crate) fn terminal_creation_count(&self) -> usize {
195 self.terminal_creations.load(Ordering::SeqCst)
196 }
197}
198
199impl crate::ThreadEnvironment for FakeThreadEnvironment {
200 fn create_terminal(
201 &self,
202 _command: String,
203 _cwd: Option<std::path::PathBuf>,
204 _output_byte_limit: Option<u64>,
205 _cx: &mut AsyncApp,
206 ) -> Task<Result<Rc<dyn crate::TerminalHandle>>> {
207 self.terminal_creations.fetch_add(1, Ordering::SeqCst);
208 let handle = self
209 .terminal_handle
210 .clone()
211 .expect("Terminal handle not available on FakeThreadEnvironment");
212 Task::ready(Ok(handle as Rc<dyn crate::TerminalHandle>))
213 }
214
215 fn create_subagent(&self, _label: String, _cx: &mut App) -> Result<Rc<dyn SubagentHandle>> {
216 Ok(self
217 .subagent_handle
218 .clone()
219 .expect("Subagent handle not available on FakeThreadEnvironment")
220 as Rc<dyn SubagentHandle>)
221 }
222}
223
224/// Environment that creates multiple independent terminal handles for testing concurrent terminals.
225struct MultiTerminalEnvironment {
226 handles: std::cell::RefCell<Vec<Rc<FakeTerminalHandle>>>,
227}
228
229impl MultiTerminalEnvironment {
230 fn new() -> Self {
231 Self {
232 handles: std::cell::RefCell::new(Vec::new()),
233 }
234 }
235
236 fn handles(&self) -> Vec<Rc<FakeTerminalHandle>> {
237 self.handles.borrow().clone()
238 }
239}
240
241impl crate::ThreadEnvironment for MultiTerminalEnvironment {
242 fn create_terminal(
243 &self,
244 _command: String,
245 _cwd: Option<std::path::PathBuf>,
246 _output_byte_limit: Option<u64>,
247 cx: &mut AsyncApp,
248 ) -> Task<Result<Rc<dyn crate::TerminalHandle>>> {
249 let handle = Rc::new(cx.update(|cx| FakeTerminalHandle::new_never_exits(cx)));
250 self.handles.borrow_mut().push(handle.clone());
251 Task::ready(Ok(handle as Rc<dyn crate::TerminalHandle>))
252 }
253
254 fn create_subagent(&self, _label: String, _cx: &mut App) -> Result<Rc<dyn SubagentHandle>> {
255 unimplemented!()
256 }
257}
258
259fn always_allow_tools(cx: &mut TestAppContext) {
260 cx.update(|cx| {
261 let mut settings = agent_settings::AgentSettings::get_global(cx).clone();
262 settings.tool_permissions.default = settings::ToolPermissionMode::Allow;
263 agent_settings::AgentSettings::override_global(settings, cx);
264 });
265}
266
267#[gpui::test]
268async fn test_echo(cx: &mut TestAppContext) {
269 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
270 let fake_model = model.as_fake();
271
272 let events = thread
273 .update(cx, |thread, cx| {
274 thread.send(UserMessageId::new(), ["Testing: Reply with 'Hello'"], cx)
275 })
276 .unwrap();
277 cx.run_until_parked();
278 fake_model.send_last_completion_stream_text_chunk("Hello");
279 fake_model
280 .send_last_completion_stream_event(LanguageModelCompletionEvent::Stop(StopReason::EndTurn));
281 fake_model.end_last_completion_stream();
282
283 let events = events.collect().await;
284 thread.update(cx, |thread, _cx| {
285 assert_eq!(
286 thread.last_received_or_pending_message().unwrap().role(),
287 Role::Assistant
288 );
289 assert_eq!(
290 thread
291 .last_received_or_pending_message()
292 .unwrap()
293 .to_markdown(),
294 "Hello\n"
295 )
296 });
297 assert_eq!(stop_events(events), vec![acp::StopReason::EndTurn]);
298}
299
300#[gpui::test]
301async fn test_terminal_tool_timeout_kills_handle(cx: &mut TestAppContext) {
302 init_test(cx);
303 always_allow_tools(cx);
304
305 let fs = FakeFs::new(cx.executor());
306 let project = Project::test(fs, [], cx).await;
307
308 let environment = Rc::new(cx.update(|cx| {
309 FakeThreadEnvironment::default().with_terminal(FakeTerminalHandle::new_never_exits(cx))
310 }));
311 let handle = environment.terminal_handle.clone().unwrap();
312
313 #[allow(clippy::arc_with_non_send_sync)]
314 let tool = Arc::new(crate::TerminalTool::new(project, environment));
315 let (event_stream, mut rx) = crate::ToolCallEventStream::test();
316
317 let task = cx.update(|cx| {
318 tool.run(
319 ToolInput::resolved(crate::TerminalToolInput {
320 command: "sleep 1000".to_string(),
321 cd: ".".to_string(),
322 timeout_ms: Some(5),
323 }),
324 event_stream,
325 cx,
326 )
327 });
328
329 let update = rx.expect_update_fields().await;
330 assert!(
331 update.content.iter().any(|blocks| {
332 blocks
333 .iter()
334 .any(|c| matches!(c, acp::ToolCallContent::Terminal(_)))
335 }),
336 "expected tool call update to include terminal content"
337 );
338
339 let mut task_future: Pin<Box<Fuse<Task<Result<String, String>>>>> = Box::pin(task.fuse());
340
341 let deadline = std::time::Instant::now() + Duration::from_millis(500);
342 loop {
343 if let Some(result) = task_future.as_mut().now_or_never() {
344 let result = result.expect("terminal tool task should complete");
345
346 assert!(
347 handle.was_killed(),
348 "expected terminal handle to be killed on timeout"
349 );
350 assert!(
351 result.contains("partial output"),
352 "expected result to include terminal output, got: {result}"
353 );
354 return;
355 }
356
357 if std::time::Instant::now() >= deadline {
358 panic!("timed out waiting for terminal tool task to complete");
359 }
360
361 cx.run_until_parked();
362 cx.background_executor.timer(Duration::from_millis(1)).await;
363 }
364}
365
366#[gpui::test]
367#[ignore]
368async fn test_terminal_tool_without_timeout_does_not_kill_handle(cx: &mut TestAppContext) {
369 init_test(cx);
370 always_allow_tools(cx);
371
372 let fs = FakeFs::new(cx.executor());
373 let project = Project::test(fs, [], cx).await;
374
375 let environment = Rc::new(cx.update(|cx| {
376 FakeThreadEnvironment::default().with_terminal(FakeTerminalHandle::new_never_exits(cx))
377 }));
378 let handle = environment.terminal_handle.clone().unwrap();
379
380 #[allow(clippy::arc_with_non_send_sync)]
381 let tool = Arc::new(crate::TerminalTool::new(project, environment));
382 let (event_stream, mut rx) = crate::ToolCallEventStream::test();
383
384 let _task = cx.update(|cx| {
385 tool.run(
386 ToolInput::resolved(crate::TerminalToolInput {
387 command: "sleep 1000".to_string(),
388 cd: ".".to_string(),
389 timeout_ms: None,
390 }),
391 event_stream,
392 cx,
393 )
394 });
395
396 let update = rx.expect_update_fields().await;
397 assert!(
398 update.content.iter().any(|blocks| {
399 blocks
400 .iter()
401 .any(|c| matches!(c, acp::ToolCallContent::Terminal(_)))
402 }),
403 "expected tool call update to include terminal content"
404 );
405
406 cx.background_executor
407 .timer(Duration::from_millis(25))
408 .await;
409
410 assert!(
411 !handle.was_killed(),
412 "did not expect terminal handle to be killed without a timeout"
413 );
414}
415
416#[gpui::test]
417async fn test_thinking(cx: &mut TestAppContext) {
418 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
419 let fake_model = model.as_fake();
420
421 let events = thread
422 .update(cx, |thread, cx| {
423 thread.send(
424 UserMessageId::new(),
425 [indoc! {"
426 Testing:
427
428 Generate a thinking step where you just think the word 'Think',
429 and have your final answer be 'Hello'
430 "}],
431 cx,
432 )
433 })
434 .unwrap();
435 cx.run_until_parked();
436 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::Thinking {
437 text: "Think".to_string(),
438 signature: None,
439 });
440 fake_model.send_last_completion_stream_text_chunk("Hello");
441 fake_model
442 .send_last_completion_stream_event(LanguageModelCompletionEvent::Stop(StopReason::EndTurn));
443 fake_model.end_last_completion_stream();
444
445 let events = events.collect().await;
446 thread.update(cx, |thread, _cx| {
447 assert_eq!(
448 thread.last_received_or_pending_message().unwrap().role(),
449 Role::Assistant
450 );
451 assert_eq!(
452 thread
453 .last_received_or_pending_message()
454 .unwrap()
455 .to_markdown(),
456 indoc! {"
457 <think>Think</think>
458 Hello
459 "}
460 )
461 });
462 assert_eq!(stop_events(events), vec![acp::StopReason::EndTurn]);
463}
464
465#[gpui::test]
466async fn test_system_prompt(cx: &mut TestAppContext) {
467 let ThreadTest {
468 model,
469 thread,
470 project_context,
471 ..
472 } = setup(cx, TestModel::Fake).await;
473 let fake_model = model.as_fake();
474
475 project_context.update(cx, |project_context, _cx| {
476 project_context.shell = "test-shell".into()
477 });
478 thread.update(cx, |thread, _| thread.add_tool(EchoTool));
479 thread
480 .update(cx, |thread, cx| {
481 thread.send(UserMessageId::new(), ["abc"], cx)
482 })
483 .unwrap();
484 cx.run_until_parked();
485 let mut pending_completions = fake_model.pending_completions();
486 assert_eq!(
487 pending_completions.len(),
488 1,
489 "unexpected pending completions: {:?}",
490 pending_completions
491 );
492
493 let pending_completion = pending_completions.pop().unwrap();
494 assert_eq!(pending_completion.messages[0].role, Role::System);
495
496 let system_message = &pending_completion.messages[0];
497 let system_prompt = system_message.content[0].to_str().unwrap();
498 assert!(
499 system_prompt.contains("test-shell"),
500 "unexpected system message: {:?}",
501 system_message
502 );
503 assert!(
504 system_prompt.contains("## Fixing Diagnostics"),
505 "unexpected system message: {:?}",
506 system_message
507 );
508}
509
510#[gpui::test]
511async fn test_system_prompt_without_tools(cx: &mut TestAppContext) {
512 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
513 let fake_model = model.as_fake();
514
515 thread
516 .update(cx, |thread, cx| {
517 thread.send(UserMessageId::new(), ["abc"], cx)
518 })
519 .unwrap();
520 cx.run_until_parked();
521 let mut pending_completions = fake_model.pending_completions();
522 assert_eq!(
523 pending_completions.len(),
524 1,
525 "unexpected pending completions: {:?}",
526 pending_completions
527 );
528
529 let pending_completion = pending_completions.pop().unwrap();
530 assert_eq!(pending_completion.messages[0].role, Role::System);
531
532 let system_message = &pending_completion.messages[0];
533 let system_prompt = system_message.content[0].to_str().unwrap();
534 assert!(
535 !system_prompt.contains("## Tool Use"),
536 "unexpected system message: {:?}",
537 system_message
538 );
539 assert!(
540 !system_prompt.contains("## Fixing Diagnostics"),
541 "unexpected system message: {:?}",
542 system_message
543 );
544}
545
546#[gpui::test]
547async fn test_prompt_caching(cx: &mut TestAppContext) {
548 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
549 let fake_model = model.as_fake();
550
551 // Send initial user message and verify it's cached
552 thread
553 .update(cx, |thread, cx| {
554 thread.send(UserMessageId::new(), ["Message 1"], cx)
555 })
556 .unwrap();
557 cx.run_until_parked();
558
559 let completion = fake_model.pending_completions().pop().unwrap();
560 assert_eq!(
561 completion.messages[1..],
562 vec![LanguageModelRequestMessage {
563 role: Role::User,
564 content: vec!["Message 1".into()],
565 cache: true,
566 reasoning_details: None,
567 }]
568 );
569 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::Text(
570 "Response to Message 1".into(),
571 ));
572 fake_model.end_last_completion_stream();
573 cx.run_until_parked();
574
575 // Send another user message and verify only the latest is cached
576 thread
577 .update(cx, |thread, cx| {
578 thread.send(UserMessageId::new(), ["Message 2"], cx)
579 })
580 .unwrap();
581 cx.run_until_parked();
582
583 let completion = fake_model.pending_completions().pop().unwrap();
584 assert_eq!(
585 completion.messages[1..],
586 vec![
587 LanguageModelRequestMessage {
588 role: Role::User,
589 content: vec!["Message 1".into()],
590 cache: false,
591 reasoning_details: None,
592 },
593 LanguageModelRequestMessage {
594 role: Role::Assistant,
595 content: vec!["Response to Message 1".into()],
596 cache: false,
597 reasoning_details: None,
598 },
599 LanguageModelRequestMessage {
600 role: Role::User,
601 content: vec!["Message 2".into()],
602 cache: true,
603 reasoning_details: None,
604 }
605 ]
606 );
607 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::Text(
608 "Response to Message 2".into(),
609 ));
610 fake_model.end_last_completion_stream();
611 cx.run_until_parked();
612
613 // Simulate a tool call and verify that the latest tool result is cached
614 thread.update(cx, |thread, _| thread.add_tool(EchoTool));
615 thread
616 .update(cx, |thread, cx| {
617 thread.send(UserMessageId::new(), ["Use the echo tool"], cx)
618 })
619 .unwrap();
620 cx.run_until_parked();
621
622 let tool_use = LanguageModelToolUse {
623 id: "tool_1".into(),
624 name: EchoTool::NAME.into(),
625 raw_input: json!({"text": "test"}).to_string(),
626 input: json!({"text": "test"}),
627 is_input_complete: true,
628 thought_signature: None,
629 };
630 fake_model
631 .send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(tool_use.clone()));
632 fake_model.end_last_completion_stream();
633 cx.run_until_parked();
634
635 let completion = fake_model.pending_completions().pop().unwrap();
636 let tool_result = LanguageModelToolResult {
637 tool_use_id: "tool_1".into(),
638 tool_name: EchoTool::NAME.into(),
639 is_error: false,
640 content: "test".into(),
641 output: Some("test".into()),
642 };
643 assert_eq!(
644 completion.messages[1..],
645 vec![
646 LanguageModelRequestMessage {
647 role: Role::User,
648 content: vec!["Message 1".into()],
649 cache: false,
650 reasoning_details: None,
651 },
652 LanguageModelRequestMessage {
653 role: Role::Assistant,
654 content: vec!["Response to Message 1".into()],
655 cache: false,
656 reasoning_details: None,
657 },
658 LanguageModelRequestMessage {
659 role: Role::User,
660 content: vec!["Message 2".into()],
661 cache: false,
662 reasoning_details: None,
663 },
664 LanguageModelRequestMessage {
665 role: Role::Assistant,
666 content: vec!["Response to Message 2".into()],
667 cache: false,
668 reasoning_details: None,
669 },
670 LanguageModelRequestMessage {
671 role: Role::User,
672 content: vec!["Use the echo tool".into()],
673 cache: false,
674 reasoning_details: None,
675 },
676 LanguageModelRequestMessage {
677 role: Role::Assistant,
678 content: vec![MessageContent::ToolUse(tool_use)],
679 cache: false,
680 reasoning_details: None,
681 },
682 LanguageModelRequestMessage {
683 role: Role::User,
684 content: vec![MessageContent::ToolResult(tool_result)],
685 cache: true,
686 reasoning_details: None,
687 }
688 ]
689 );
690}
691
692#[gpui::test]
693#[cfg_attr(not(feature = "e2e"), ignore)]
694async fn test_basic_tool_calls(cx: &mut TestAppContext) {
695 let ThreadTest { thread, .. } = setup(cx, TestModel::Sonnet4).await;
696
697 // Test a tool call that's likely to complete *before* streaming stops.
698 let events = thread
699 .update(cx, |thread, cx| {
700 thread.add_tool(EchoTool);
701 thread.send(
702 UserMessageId::new(),
703 ["Now test the echo tool with 'Hello'. Does it work? Say 'Yes' or 'No'."],
704 cx,
705 )
706 })
707 .unwrap()
708 .collect()
709 .await;
710 assert_eq!(stop_events(events), vec![acp::StopReason::EndTurn]);
711
712 // Test a tool calls that's likely to complete *after* streaming stops.
713 let events = thread
714 .update(cx, |thread, cx| {
715 thread.remove_tool(&EchoTool::NAME);
716 thread.add_tool(DelayTool);
717 thread.send(
718 UserMessageId::new(),
719 [
720 "Now call the delay tool with 200ms.",
721 "When the timer goes off, then you echo the output of the tool.",
722 ],
723 cx,
724 )
725 })
726 .unwrap()
727 .collect()
728 .await;
729 assert_eq!(stop_events(events), vec![acp::StopReason::EndTurn]);
730 thread.update(cx, |thread, _cx| {
731 assert!(
732 thread
733 .last_received_or_pending_message()
734 .unwrap()
735 .as_agent_message()
736 .unwrap()
737 .content
738 .iter()
739 .any(|content| {
740 if let AgentMessageContent::Text(text) = content {
741 text.contains("Ding")
742 } else {
743 false
744 }
745 }),
746 "{}",
747 thread.to_markdown()
748 );
749 });
750}
751
752#[gpui::test]
753#[cfg_attr(not(feature = "e2e"), ignore)]
754async fn test_streaming_tool_calls(cx: &mut TestAppContext) {
755 let ThreadTest { thread, .. } = setup(cx, TestModel::Sonnet4).await;
756
757 // Test a tool call that's likely to complete *before* streaming stops.
758 let mut events = thread
759 .update(cx, |thread, cx| {
760 thread.add_tool(WordListTool);
761 thread.send(UserMessageId::new(), ["Test the word_list tool."], cx)
762 })
763 .unwrap();
764
765 let mut saw_partial_tool_use = false;
766 while let Some(event) = events.next().await {
767 if let Ok(ThreadEvent::ToolCall(tool_call)) = event {
768 thread.update(cx, |thread, _cx| {
769 // Look for a tool use in the thread's last message
770 let message = thread.last_received_or_pending_message().unwrap();
771 let agent_message = message.as_agent_message().unwrap();
772 let last_content = agent_message.content.last().unwrap();
773 if let AgentMessageContent::ToolUse(last_tool_use) = last_content {
774 assert_eq!(last_tool_use.name.as_ref(), "word_list");
775 if tool_call.status == acp::ToolCallStatus::Pending {
776 if !last_tool_use.is_input_complete
777 && last_tool_use.input.get("g").is_none()
778 {
779 saw_partial_tool_use = true;
780 }
781 } else {
782 last_tool_use
783 .input
784 .get("a")
785 .expect("'a' has streamed because input is now complete");
786 last_tool_use
787 .input
788 .get("g")
789 .expect("'g' has streamed because input is now complete");
790 }
791 } else {
792 panic!("last content should be a tool use");
793 }
794 });
795 }
796 }
797
798 assert!(
799 saw_partial_tool_use,
800 "should see at least one partially streamed tool use in the history"
801 );
802}
803
804#[gpui::test]
805async fn test_tool_authorization(cx: &mut TestAppContext) {
806 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
807 let fake_model = model.as_fake();
808
809 let mut events = thread
810 .update(cx, |thread, cx| {
811 thread.add_tool(ToolRequiringPermission);
812 thread.send(UserMessageId::new(), ["abc"], cx)
813 })
814 .unwrap();
815 cx.run_until_parked();
816 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
817 LanguageModelToolUse {
818 id: "tool_id_1".into(),
819 name: ToolRequiringPermission::NAME.into(),
820 raw_input: "{}".into(),
821 input: json!({}),
822 is_input_complete: true,
823 thought_signature: None,
824 },
825 ));
826 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
827 LanguageModelToolUse {
828 id: "tool_id_2".into(),
829 name: ToolRequiringPermission::NAME.into(),
830 raw_input: "{}".into(),
831 input: json!({}),
832 is_input_complete: true,
833 thought_signature: None,
834 },
835 ));
836 fake_model.end_last_completion_stream();
837 let tool_call_auth_1 = next_tool_call_authorization(&mut events).await;
838 let tool_call_auth_2 = next_tool_call_authorization(&mut events).await;
839
840 // Approve the first - send "allow" option_id (UI transforms "once" to "allow")
841 tool_call_auth_1
842 .response
843 .send(acp_thread::SelectedPermissionOutcome::new(
844 acp::PermissionOptionId::new("allow"),
845 acp::PermissionOptionKind::AllowOnce,
846 ))
847 .unwrap();
848 cx.run_until_parked();
849
850 // Reject the second - send "deny" option_id directly since Deny is now a button
851 tool_call_auth_2
852 .response
853 .send(acp_thread::SelectedPermissionOutcome::new(
854 acp::PermissionOptionId::new("deny"),
855 acp::PermissionOptionKind::RejectOnce,
856 ))
857 .unwrap();
858 cx.run_until_parked();
859
860 let completion = fake_model.pending_completions().pop().unwrap();
861 let message = completion.messages.last().unwrap();
862 assert_eq!(
863 message.content,
864 vec![
865 language_model::MessageContent::ToolResult(LanguageModelToolResult {
866 tool_use_id: tool_call_auth_1.tool_call.tool_call_id.0.to_string().into(),
867 tool_name: ToolRequiringPermission::NAME.into(),
868 is_error: false,
869 content: "Allowed".into(),
870 output: Some("Allowed".into())
871 }),
872 language_model::MessageContent::ToolResult(LanguageModelToolResult {
873 tool_use_id: tool_call_auth_2.tool_call.tool_call_id.0.to_string().into(),
874 tool_name: ToolRequiringPermission::NAME.into(),
875 is_error: true,
876 content: "Permission to run tool denied by user".into(),
877 output: Some("Permission to run tool denied by user".into())
878 })
879 ]
880 );
881
882 // Simulate yet another tool call.
883 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
884 LanguageModelToolUse {
885 id: "tool_id_3".into(),
886 name: ToolRequiringPermission::NAME.into(),
887 raw_input: "{}".into(),
888 input: json!({}),
889 is_input_complete: true,
890 thought_signature: None,
891 },
892 ));
893 fake_model.end_last_completion_stream();
894
895 // Respond by always allowing tools - send transformed option_id
896 // (UI transforms "always:tool_requiring_permission" to "always_allow:tool_requiring_permission")
897 let tool_call_auth_3 = next_tool_call_authorization(&mut events).await;
898 tool_call_auth_3
899 .response
900 .send(acp_thread::SelectedPermissionOutcome::new(
901 acp::PermissionOptionId::new("always_allow:tool_requiring_permission"),
902 acp::PermissionOptionKind::AllowAlways,
903 ))
904 .unwrap();
905 cx.run_until_parked();
906 let completion = fake_model.pending_completions().pop().unwrap();
907 let message = completion.messages.last().unwrap();
908 assert_eq!(
909 message.content,
910 vec![language_model::MessageContent::ToolResult(
911 LanguageModelToolResult {
912 tool_use_id: tool_call_auth_3.tool_call.tool_call_id.0.to_string().into(),
913 tool_name: ToolRequiringPermission::NAME.into(),
914 is_error: false,
915 content: "Allowed".into(),
916 output: Some("Allowed".into())
917 }
918 )]
919 );
920
921 // Simulate a final tool call, ensuring we don't trigger authorization.
922 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
923 LanguageModelToolUse {
924 id: "tool_id_4".into(),
925 name: ToolRequiringPermission::NAME.into(),
926 raw_input: "{}".into(),
927 input: json!({}),
928 is_input_complete: true,
929 thought_signature: None,
930 },
931 ));
932 fake_model.end_last_completion_stream();
933 cx.run_until_parked();
934 let completion = fake_model.pending_completions().pop().unwrap();
935 let message = completion.messages.last().unwrap();
936 assert_eq!(
937 message.content,
938 vec![language_model::MessageContent::ToolResult(
939 LanguageModelToolResult {
940 tool_use_id: "tool_id_4".into(),
941 tool_name: ToolRequiringPermission::NAME.into(),
942 is_error: false,
943 content: "Allowed".into(),
944 output: Some("Allowed".into())
945 }
946 )]
947 );
948}
949
950#[gpui::test]
951async fn test_tool_hallucination(cx: &mut TestAppContext) {
952 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
953 let fake_model = model.as_fake();
954
955 let mut events = thread
956 .update(cx, |thread, cx| {
957 thread.send(UserMessageId::new(), ["abc"], cx)
958 })
959 .unwrap();
960 cx.run_until_parked();
961 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
962 LanguageModelToolUse {
963 id: "tool_id_1".into(),
964 name: "nonexistent_tool".into(),
965 raw_input: "{}".into(),
966 input: json!({}),
967 is_input_complete: true,
968 thought_signature: None,
969 },
970 ));
971 fake_model.end_last_completion_stream();
972
973 let tool_call = expect_tool_call(&mut events).await;
974 assert_eq!(tool_call.title, "nonexistent_tool");
975 assert_eq!(tool_call.status, acp::ToolCallStatus::Pending);
976 let update = expect_tool_call_update_fields(&mut events).await;
977 assert_eq!(update.fields.status, Some(acp::ToolCallStatus::Failed));
978}
979
980async fn expect_tool_call(events: &mut UnboundedReceiver<Result<ThreadEvent>>) -> acp::ToolCall {
981 let event = events
982 .next()
983 .await
984 .expect("no tool call authorization event received")
985 .unwrap();
986 match event {
987 ThreadEvent::ToolCall(tool_call) => tool_call,
988 event => {
989 panic!("Unexpected event {event:?}");
990 }
991 }
992}
993
994async fn expect_tool_call_update_fields(
995 events: &mut UnboundedReceiver<Result<ThreadEvent>>,
996) -> acp::ToolCallUpdate {
997 let event = events
998 .next()
999 .await
1000 .expect("no tool call authorization event received")
1001 .unwrap();
1002 match event {
1003 ThreadEvent::ToolCallUpdate(acp_thread::ToolCallUpdate::UpdateFields(update)) => update,
1004 event => {
1005 panic!("Unexpected event {event:?}");
1006 }
1007 }
1008}
1009
1010async fn expect_plan(events: &mut UnboundedReceiver<Result<ThreadEvent>>) -> acp::Plan {
1011 let event = events
1012 .next()
1013 .await
1014 .expect("no plan event received")
1015 .unwrap();
1016 match event {
1017 ThreadEvent::Plan(plan) => plan,
1018 event => {
1019 panic!("Unexpected event {event:?}");
1020 }
1021 }
1022}
1023
1024async fn next_tool_call_authorization(
1025 events: &mut UnboundedReceiver<Result<ThreadEvent>>,
1026) -> ToolCallAuthorization {
1027 loop {
1028 let event = events
1029 .next()
1030 .await
1031 .expect("no tool call authorization event received")
1032 .unwrap();
1033 if let ThreadEvent::ToolCallAuthorization(tool_call_authorization) = event {
1034 let permission_kinds = tool_call_authorization
1035 .options
1036 .first_option_of_kind(acp::PermissionOptionKind::AllowAlways)
1037 .map(|option| option.kind);
1038 let allow_once = tool_call_authorization
1039 .options
1040 .first_option_of_kind(acp::PermissionOptionKind::AllowOnce)
1041 .map(|option| option.kind);
1042
1043 assert_eq!(
1044 permission_kinds,
1045 Some(acp::PermissionOptionKind::AllowAlways)
1046 );
1047 assert_eq!(allow_once, Some(acp::PermissionOptionKind::AllowOnce));
1048 return tool_call_authorization;
1049 }
1050 }
1051}
1052
1053#[test]
1054fn test_permission_options_terminal_with_pattern() {
1055 let permission_options = ToolPermissionContext::new(
1056 TerminalTool::NAME,
1057 vec!["cargo build --release".to_string()],
1058 )
1059 .build_permission_options();
1060
1061 let PermissionOptions::Dropdown(choices) = permission_options else {
1062 panic!("Expected dropdown permission options");
1063 };
1064
1065 assert_eq!(choices.len(), 3);
1066 let labels: Vec<&str> = choices
1067 .iter()
1068 .map(|choice| choice.allow.name.as_ref())
1069 .collect();
1070 assert!(labels.contains(&"Always for terminal"));
1071 assert!(labels.contains(&"Always for `cargo build` commands"));
1072 assert!(labels.contains(&"Only this time"));
1073}
1074
1075#[test]
1076fn test_permission_options_terminal_command_with_flag_second_token() {
1077 let permission_options =
1078 ToolPermissionContext::new(TerminalTool::NAME, vec!["ls -la".to_string()])
1079 .build_permission_options();
1080
1081 let PermissionOptions::Dropdown(choices) = permission_options else {
1082 panic!("Expected dropdown permission options");
1083 };
1084
1085 assert_eq!(choices.len(), 3);
1086 let labels: Vec<&str> = choices
1087 .iter()
1088 .map(|choice| choice.allow.name.as_ref())
1089 .collect();
1090 assert!(labels.contains(&"Always for terminal"));
1091 assert!(labels.contains(&"Always for `ls` commands"));
1092 assert!(labels.contains(&"Only this time"));
1093}
1094
1095#[test]
1096fn test_permission_options_terminal_single_word_command() {
1097 let permission_options =
1098 ToolPermissionContext::new(TerminalTool::NAME, vec!["whoami".to_string()])
1099 .build_permission_options();
1100
1101 let PermissionOptions::Dropdown(choices) = permission_options else {
1102 panic!("Expected dropdown permission options");
1103 };
1104
1105 assert_eq!(choices.len(), 3);
1106 let labels: Vec<&str> = choices
1107 .iter()
1108 .map(|choice| choice.allow.name.as_ref())
1109 .collect();
1110 assert!(labels.contains(&"Always for terminal"));
1111 assert!(labels.contains(&"Always for `whoami` commands"));
1112 assert!(labels.contains(&"Only this time"));
1113}
1114
1115#[test]
1116fn test_permission_options_edit_file_with_path_pattern() {
1117 let permission_options =
1118 ToolPermissionContext::new(EditFileTool::NAME, vec!["src/main.rs".to_string()])
1119 .build_permission_options();
1120
1121 let PermissionOptions::Dropdown(choices) = permission_options else {
1122 panic!("Expected dropdown permission options");
1123 };
1124
1125 let labels: Vec<&str> = choices
1126 .iter()
1127 .map(|choice| choice.allow.name.as_ref())
1128 .collect();
1129 assert!(labels.contains(&"Always for edit file"));
1130 assert!(labels.contains(&"Always for `src/`"));
1131}
1132
1133#[test]
1134fn test_permission_options_fetch_with_domain_pattern() {
1135 let permission_options =
1136 ToolPermissionContext::new(FetchTool::NAME, vec!["https://docs.rs/gpui".to_string()])
1137 .build_permission_options();
1138
1139 let PermissionOptions::Dropdown(choices) = permission_options else {
1140 panic!("Expected dropdown permission options");
1141 };
1142
1143 let labels: Vec<&str> = choices
1144 .iter()
1145 .map(|choice| choice.allow.name.as_ref())
1146 .collect();
1147 assert!(labels.contains(&"Always for fetch"));
1148 assert!(labels.contains(&"Always for `docs.rs`"));
1149}
1150
1151#[test]
1152fn test_permission_options_without_pattern() {
1153 let permission_options = ToolPermissionContext::new(
1154 TerminalTool::NAME,
1155 vec!["./deploy.sh --production".to_string()],
1156 )
1157 .build_permission_options();
1158
1159 let PermissionOptions::Dropdown(choices) = permission_options else {
1160 panic!("Expected dropdown permission options");
1161 };
1162
1163 assert_eq!(choices.len(), 2);
1164 let labels: Vec<&str> = choices
1165 .iter()
1166 .map(|choice| choice.allow.name.as_ref())
1167 .collect();
1168 assert!(labels.contains(&"Always for terminal"));
1169 assert!(labels.contains(&"Only this time"));
1170 assert!(!labels.iter().any(|label| label.contains("commands")));
1171}
1172
1173#[test]
1174fn test_permission_options_symlink_target_are_flat_once_only() {
1175 let permission_options =
1176 ToolPermissionContext::symlink_target(EditFileTool::NAME, vec!["/outside/file.txt".into()])
1177 .build_permission_options();
1178
1179 let PermissionOptions::Flat(options) = permission_options else {
1180 panic!("Expected flat permission options for symlink target authorization");
1181 };
1182
1183 assert_eq!(options.len(), 2);
1184 assert!(options.iter().any(|option| {
1185 option.option_id.0.as_ref() == "allow"
1186 && option.kind == acp::PermissionOptionKind::AllowOnce
1187 }));
1188 assert!(options.iter().any(|option| {
1189 option.option_id.0.as_ref() == "deny"
1190 && option.kind == acp::PermissionOptionKind::RejectOnce
1191 }));
1192}
1193
1194#[test]
1195fn test_permission_option_ids_for_terminal() {
1196 let permission_options = ToolPermissionContext::new(
1197 TerminalTool::NAME,
1198 vec!["cargo build --release".to_string()],
1199 )
1200 .build_permission_options();
1201
1202 let PermissionOptions::Dropdown(choices) = permission_options else {
1203 panic!("Expected dropdown permission options");
1204 };
1205
1206 // Expect 3 choices: always-tool, always-pattern, once
1207 assert_eq!(choices.len(), 3);
1208
1209 // First two choices both use the tool-level option IDs
1210 assert_eq!(
1211 choices[0].allow.option_id.0.as_ref(),
1212 "always_allow:terminal"
1213 );
1214 assert_eq!(choices[0].deny.option_id.0.as_ref(), "always_deny:terminal");
1215 assert!(choices[0].sub_patterns.is_empty());
1216
1217 assert_eq!(
1218 choices[1].allow.option_id.0.as_ref(),
1219 "always_allow:terminal"
1220 );
1221 assert_eq!(choices[1].deny.option_id.0.as_ref(), "always_deny:terminal");
1222 assert_eq!(choices[1].sub_patterns, vec!["^cargo\\s+build(\\s|$)"]);
1223
1224 // Third choice is the one-time allow/deny
1225 assert_eq!(choices[2].allow.option_id.0.as_ref(), "allow");
1226 assert_eq!(choices[2].deny.option_id.0.as_ref(), "deny");
1227 assert!(choices[2].sub_patterns.is_empty());
1228}
1229
1230#[test]
1231fn test_permission_options_terminal_pipeline_produces_dropdown_with_patterns() {
1232 let permission_options = ToolPermissionContext::new(
1233 TerminalTool::NAME,
1234 vec!["cargo test 2>&1 | tail".to_string()],
1235 )
1236 .build_permission_options();
1237
1238 let PermissionOptions::DropdownWithPatterns {
1239 choices,
1240 patterns,
1241 tool_name,
1242 } = permission_options
1243 else {
1244 panic!("Expected DropdownWithPatterns permission options for pipeline command");
1245 };
1246
1247 assert_eq!(tool_name, TerminalTool::NAME);
1248
1249 // Should have "Always for terminal" and "Only this time" choices
1250 assert_eq!(choices.len(), 2);
1251 let labels: Vec<&str> = choices
1252 .iter()
1253 .map(|choice| choice.allow.name.as_ref())
1254 .collect();
1255 assert!(labels.contains(&"Always for terminal"));
1256 assert!(labels.contains(&"Only this time"));
1257
1258 // Should have per-command patterns for "cargo test" and "tail"
1259 assert_eq!(patterns.len(), 2);
1260 let pattern_names: Vec<&str> = patterns.iter().map(|cp| cp.display_name.as_str()).collect();
1261 assert!(pattern_names.contains(&"cargo test"));
1262 assert!(pattern_names.contains(&"tail"));
1263
1264 // Verify patterns are valid regex patterns
1265 let regex_patterns: Vec<&str> = patterns.iter().map(|cp| cp.pattern.as_str()).collect();
1266 assert!(regex_patterns.contains(&"^cargo\\s+test(\\s|$)"));
1267 assert!(regex_patterns.contains(&"^tail\\b"));
1268}
1269
1270#[test]
1271fn test_permission_options_terminal_pipeline_with_chaining() {
1272 let permission_options = ToolPermissionContext::new(
1273 TerminalTool::NAME,
1274 vec!["npm install && npm test | tail".to_string()],
1275 )
1276 .build_permission_options();
1277
1278 let PermissionOptions::DropdownWithPatterns { patterns, .. } = permission_options else {
1279 panic!("Expected DropdownWithPatterns for chained pipeline command");
1280 };
1281
1282 // With subcommand-aware patterns, "npm install" and "npm test" are distinct
1283 assert_eq!(patterns.len(), 3);
1284 let pattern_names: Vec<&str> = patterns.iter().map(|cp| cp.display_name.as_str()).collect();
1285 assert!(pattern_names.contains(&"npm install"));
1286 assert!(pattern_names.contains(&"npm test"));
1287 assert!(pattern_names.contains(&"tail"));
1288}
1289
1290#[gpui::test]
1291#[cfg_attr(not(feature = "e2e"), ignore)]
1292async fn test_concurrent_tool_calls(cx: &mut TestAppContext) {
1293 let ThreadTest { thread, .. } = setup(cx, TestModel::Sonnet4).await;
1294
1295 // Test concurrent tool calls with different delay times
1296 let events = thread
1297 .update(cx, |thread, cx| {
1298 thread.add_tool(DelayTool);
1299 thread.send(
1300 UserMessageId::new(),
1301 [
1302 "Call the delay tool twice in the same message.",
1303 "Once with 100ms. Once with 300ms.",
1304 "When both timers are complete, describe the outputs.",
1305 ],
1306 cx,
1307 )
1308 })
1309 .unwrap()
1310 .collect()
1311 .await;
1312
1313 let stop_reasons = stop_events(events);
1314 assert_eq!(stop_reasons, vec![acp::StopReason::EndTurn]);
1315
1316 thread.update(cx, |thread, _cx| {
1317 let last_message = thread.last_received_or_pending_message().unwrap();
1318 let agent_message = last_message.as_agent_message().unwrap();
1319 let text = agent_message
1320 .content
1321 .iter()
1322 .filter_map(|content| {
1323 if let AgentMessageContent::Text(text) = content {
1324 Some(text.as_str())
1325 } else {
1326 None
1327 }
1328 })
1329 .collect::<String>();
1330
1331 assert!(text.contains("Ding"));
1332 });
1333}
1334
1335#[gpui::test]
1336async fn test_profiles(cx: &mut TestAppContext) {
1337 let ThreadTest {
1338 model, thread, fs, ..
1339 } = setup(cx, TestModel::Fake).await;
1340 let fake_model = model.as_fake();
1341
1342 thread.update(cx, |thread, _cx| {
1343 thread.add_tool(DelayTool);
1344 thread.add_tool(EchoTool);
1345 thread.add_tool(InfiniteTool);
1346 });
1347
1348 // Override profiles and wait for settings to be loaded.
1349 fs.insert_file(
1350 paths::settings_file(),
1351 json!({
1352 "agent": {
1353 "profiles": {
1354 "test-1": {
1355 "name": "Test Profile 1",
1356 "tools": {
1357 EchoTool::NAME: true,
1358 DelayTool::NAME: true,
1359 }
1360 },
1361 "test-2": {
1362 "name": "Test Profile 2",
1363 "tools": {
1364 InfiniteTool::NAME: true,
1365 }
1366 }
1367 }
1368 }
1369 })
1370 .to_string()
1371 .into_bytes(),
1372 )
1373 .await;
1374 cx.run_until_parked();
1375
1376 // Test that test-1 profile (default) has echo and delay tools
1377 thread
1378 .update(cx, |thread, cx| {
1379 thread.set_profile(AgentProfileId("test-1".into()), cx);
1380 thread.send(UserMessageId::new(), ["test"], cx)
1381 })
1382 .unwrap();
1383 cx.run_until_parked();
1384
1385 let mut pending_completions = fake_model.pending_completions();
1386 assert_eq!(pending_completions.len(), 1);
1387 let completion = pending_completions.pop().unwrap();
1388 let tool_names: Vec<String> = completion
1389 .tools
1390 .iter()
1391 .map(|tool| tool.name.clone())
1392 .collect();
1393 assert_eq!(tool_names, vec![DelayTool::NAME, EchoTool::NAME]);
1394 fake_model.end_last_completion_stream();
1395
1396 // Switch to test-2 profile, and verify that it has only the infinite tool.
1397 thread
1398 .update(cx, |thread, cx| {
1399 thread.set_profile(AgentProfileId("test-2".into()), cx);
1400 thread.send(UserMessageId::new(), ["test2"], cx)
1401 })
1402 .unwrap();
1403 cx.run_until_parked();
1404 let mut pending_completions = fake_model.pending_completions();
1405 assert_eq!(pending_completions.len(), 1);
1406 let completion = pending_completions.pop().unwrap();
1407 let tool_names: Vec<String> = completion
1408 .tools
1409 .iter()
1410 .map(|tool| tool.name.clone())
1411 .collect();
1412 assert_eq!(tool_names, vec![InfiniteTool::NAME]);
1413}
1414
1415#[gpui::test]
1416async fn test_mcp_tools(cx: &mut TestAppContext) {
1417 let ThreadTest {
1418 model,
1419 thread,
1420 context_server_store,
1421 fs,
1422 ..
1423 } = setup(cx, TestModel::Fake).await;
1424 let fake_model = model.as_fake();
1425
1426 // Override profiles and wait for settings to be loaded.
1427 fs.insert_file(
1428 paths::settings_file(),
1429 json!({
1430 "agent": {
1431 "tool_permissions": { "default": "allow" },
1432 "profiles": {
1433 "test": {
1434 "name": "Test Profile",
1435 "enable_all_context_servers": true,
1436 "tools": {
1437 EchoTool::NAME: true,
1438 }
1439 },
1440 }
1441 }
1442 })
1443 .to_string()
1444 .into_bytes(),
1445 )
1446 .await;
1447 cx.run_until_parked();
1448 thread.update(cx, |thread, cx| {
1449 thread.set_profile(AgentProfileId("test".into()), cx)
1450 });
1451
1452 let mut mcp_tool_calls = setup_context_server(
1453 "test_server",
1454 vec![context_server::types::Tool {
1455 name: "echo".into(),
1456 description: None,
1457 input_schema: serde_json::to_value(EchoTool::input_schema(
1458 LanguageModelToolSchemaFormat::JsonSchema,
1459 ))
1460 .unwrap(),
1461 output_schema: None,
1462 annotations: None,
1463 }],
1464 &context_server_store,
1465 cx,
1466 );
1467
1468 let events = thread.update(cx, |thread, cx| {
1469 thread.send(UserMessageId::new(), ["Hey"], cx).unwrap()
1470 });
1471 cx.run_until_parked();
1472
1473 // Simulate the model calling the MCP tool.
1474 let completion = fake_model.pending_completions().pop().unwrap();
1475 assert_eq!(tool_names_for_completion(&completion), vec!["echo"]);
1476 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
1477 LanguageModelToolUse {
1478 id: "tool_1".into(),
1479 name: "echo".into(),
1480 raw_input: json!({"text": "test"}).to_string(),
1481 input: json!({"text": "test"}),
1482 is_input_complete: true,
1483 thought_signature: None,
1484 },
1485 ));
1486 fake_model.end_last_completion_stream();
1487 cx.run_until_parked();
1488
1489 let (tool_call_params, tool_call_response) = mcp_tool_calls.next().await.unwrap();
1490 assert_eq!(tool_call_params.name, "echo");
1491 assert_eq!(tool_call_params.arguments, Some(json!({"text": "test"})));
1492 tool_call_response
1493 .send(context_server::types::CallToolResponse {
1494 content: vec![context_server::types::ToolResponseContent::Text {
1495 text: "test".into(),
1496 }],
1497 is_error: None,
1498 meta: None,
1499 structured_content: None,
1500 })
1501 .unwrap();
1502 cx.run_until_parked();
1503
1504 assert_eq!(tool_names_for_completion(&completion), vec!["echo"]);
1505 fake_model.send_last_completion_stream_text_chunk("Done!");
1506 fake_model.end_last_completion_stream();
1507 events.collect::<Vec<_>>().await;
1508
1509 // Send again after adding the echo tool, ensuring the name collision is resolved.
1510 let events = thread.update(cx, |thread, cx| {
1511 thread.add_tool(EchoTool);
1512 thread.send(UserMessageId::new(), ["Go"], cx).unwrap()
1513 });
1514 cx.run_until_parked();
1515 let completion = fake_model.pending_completions().pop().unwrap();
1516 assert_eq!(
1517 tool_names_for_completion(&completion),
1518 vec!["echo", "test_server_echo"]
1519 );
1520 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
1521 LanguageModelToolUse {
1522 id: "tool_2".into(),
1523 name: "test_server_echo".into(),
1524 raw_input: json!({"text": "mcp"}).to_string(),
1525 input: json!({"text": "mcp"}),
1526 is_input_complete: true,
1527 thought_signature: None,
1528 },
1529 ));
1530 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
1531 LanguageModelToolUse {
1532 id: "tool_3".into(),
1533 name: "echo".into(),
1534 raw_input: json!({"text": "native"}).to_string(),
1535 input: json!({"text": "native"}),
1536 is_input_complete: true,
1537 thought_signature: None,
1538 },
1539 ));
1540 fake_model.end_last_completion_stream();
1541 cx.run_until_parked();
1542
1543 let (tool_call_params, tool_call_response) = mcp_tool_calls.next().await.unwrap();
1544 assert_eq!(tool_call_params.name, "echo");
1545 assert_eq!(tool_call_params.arguments, Some(json!({"text": "mcp"})));
1546 tool_call_response
1547 .send(context_server::types::CallToolResponse {
1548 content: vec![context_server::types::ToolResponseContent::Text { text: "mcp".into() }],
1549 is_error: None,
1550 meta: None,
1551 structured_content: None,
1552 })
1553 .unwrap();
1554 cx.run_until_parked();
1555
1556 // Ensure the tool results were inserted with the correct names.
1557 let completion = fake_model.pending_completions().pop().unwrap();
1558 assert_eq!(
1559 completion.messages.last().unwrap().content,
1560 vec![
1561 MessageContent::ToolResult(LanguageModelToolResult {
1562 tool_use_id: "tool_3".into(),
1563 tool_name: "echo".into(),
1564 is_error: false,
1565 content: "native".into(),
1566 output: Some("native".into()),
1567 },),
1568 MessageContent::ToolResult(LanguageModelToolResult {
1569 tool_use_id: "tool_2".into(),
1570 tool_name: "test_server_echo".into(),
1571 is_error: false,
1572 content: "mcp".into(),
1573 output: Some("mcp".into()),
1574 },),
1575 ]
1576 );
1577 fake_model.end_last_completion_stream();
1578 events.collect::<Vec<_>>().await;
1579}
1580
1581#[gpui::test]
1582async fn test_mcp_tool_result_displayed_when_server_disconnected(cx: &mut TestAppContext) {
1583 let ThreadTest {
1584 model,
1585 thread,
1586 context_server_store,
1587 fs,
1588 ..
1589 } = setup(cx, TestModel::Fake).await;
1590 let fake_model = model.as_fake();
1591
1592 // Setup settings to allow MCP tools
1593 fs.insert_file(
1594 paths::settings_file(),
1595 json!({
1596 "agent": {
1597 "always_allow_tool_actions": true,
1598 "profiles": {
1599 "test": {
1600 "name": "Test Profile",
1601 "enable_all_context_servers": true,
1602 "tools": {}
1603 },
1604 }
1605 }
1606 })
1607 .to_string()
1608 .into_bytes(),
1609 )
1610 .await;
1611 cx.run_until_parked();
1612 thread.update(cx, |thread, cx| {
1613 thread.set_profile(AgentProfileId("test".into()), cx)
1614 });
1615
1616 // Setup a context server with a tool
1617 let mut mcp_tool_calls = setup_context_server(
1618 "github_server",
1619 vec![context_server::types::Tool {
1620 name: "issue_read".into(),
1621 description: Some("Read a GitHub issue".into()),
1622 input_schema: json!({
1623 "type": "object",
1624 "properties": {
1625 "issue_url": { "type": "string" }
1626 }
1627 }),
1628 output_schema: None,
1629 annotations: None,
1630 }],
1631 &context_server_store,
1632 cx,
1633 );
1634
1635 // Send a message and have the model call the MCP tool
1636 let events = thread.update(cx, |thread, cx| {
1637 thread
1638 .send(UserMessageId::new(), ["Read issue #47404"], cx)
1639 .unwrap()
1640 });
1641 cx.run_until_parked();
1642
1643 // Verify the MCP tool is available to the model
1644 let completion = fake_model.pending_completions().pop().unwrap();
1645 assert_eq!(
1646 tool_names_for_completion(&completion),
1647 vec!["issue_read"],
1648 "MCP tool should be available"
1649 );
1650
1651 // Simulate the model calling the MCP tool
1652 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
1653 LanguageModelToolUse {
1654 id: "tool_1".into(),
1655 name: "issue_read".into(),
1656 raw_input: json!({"issue_url": "https://github.com/zed-industries/zed/issues/47404"})
1657 .to_string(),
1658 input: json!({"issue_url": "https://github.com/zed-industries/zed/issues/47404"}),
1659 is_input_complete: true,
1660 thought_signature: None,
1661 },
1662 ));
1663 fake_model.end_last_completion_stream();
1664 cx.run_until_parked();
1665
1666 // The MCP server receives the tool call and responds with content
1667 let expected_tool_output = "Issue #47404: Tool call results are cleared upon app close";
1668 let (tool_call_params, tool_call_response) = mcp_tool_calls.next().await.unwrap();
1669 assert_eq!(tool_call_params.name, "issue_read");
1670 tool_call_response
1671 .send(context_server::types::CallToolResponse {
1672 content: vec![context_server::types::ToolResponseContent::Text {
1673 text: expected_tool_output.into(),
1674 }],
1675 is_error: None,
1676 meta: None,
1677 structured_content: None,
1678 })
1679 .unwrap();
1680 cx.run_until_parked();
1681
1682 // After tool completes, the model continues with a new completion request
1683 // that includes the tool results. We need to respond to this.
1684 let _completion = fake_model.pending_completions().pop().unwrap();
1685 fake_model.send_last_completion_stream_text_chunk("I found the issue!");
1686 fake_model
1687 .send_last_completion_stream_event(LanguageModelCompletionEvent::Stop(StopReason::EndTurn));
1688 fake_model.end_last_completion_stream();
1689 events.collect::<Vec<_>>().await;
1690
1691 // Verify the tool result is stored in the thread by checking the markdown output.
1692 // The tool result is in the first assistant message (not the last one, which is
1693 // the model's response after the tool completed).
1694 thread.update(cx, |thread, _cx| {
1695 let markdown = thread.to_markdown();
1696 assert!(
1697 markdown.contains("**Tool Result**: issue_read"),
1698 "Thread should contain tool result header"
1699 );
1700 assert!(
1701 markdown.contains(expected_tool_output),
1702 "Thread should contain tool output: {}",
1703 expected_tool_output
1704 );
1705 });
1706
1707 // Simulate app restart: disconnect the MCP server.
1708 // After restart, the MCP server won't be connected yet when the thread is replayed.
1709 context_server_store.update(cx, |store, cx| {
1710 let _ = store.stop_server(&ContextServerId("github_server".into()), cx);
1711 });
1712 cx.run_until_parked();
1713
1714 // Replay the thread (this is what happens when loading a saved thread)
1715 let mut replay_events = thread.update(cx, |thread, cx| thread.replay(cx));
1716
1717 let mut found_tool_call = None;
1718 let mut found_tool_call_update_with_output = None;
1719
1720 while let Some(event) = replay_events.next().await {
1721 let event = event.unwrap();
1722 match &event {
1723 ThreadEvent::ToolCall(tc) if tc.tool_call_id.to_string() == "tool_1" => {
1724 found_tool_call = Some(tc.clone());
1725 }
1726 ThreadEvent::ToolCallUpdate(acp_thread::ToolCallUpdate::UpdateFields(update))
1727 if update.tool_call_id.to_string() == "tool_1" =>
1728 {
1729 if update.fields.raw_output.is_some() {
1730 found_tool_call_update_with_output = Some(update.clone());
1731 }
1732 }
1733 _ => {}
1734 }
1735 }
1736
1737 // The tool call should be found
1738 assert!(
1739 found_tool_call.is_some(),
1740 "Tool call should be emitted during replay"
1741 );
1742
1743 assert!(
1744 found_tool_call_update_with_output.is_some(),
1745 "ToolCallUpdate with raw_output should be emitted even when MCP server is disconnected."
1746 );
1747
1748 let update = found_tool_call_update_with_output.unwrap();
1749 assert_eq!(
1750 update.fields.raw_output,
1751 Some(expected_tool_output.into()),
1752 "raw_output should contain the saved tool result"
1753 );
1754
1755 // Also verify the status is correct (completed, not failed)
1756 assert_eq!(
1757 update.fields.status,
1758 Some(acp::ToolCallStatus::Completed),
1759 "Tool call status should reflect the original completion status"
1760 );
1761}
1762
1763#[gpui::test]
1764async fn test_mcp_tool_truncation(cx: &mut TestAppContext) {
1765 let ThreadTest {
1766 model,
1767 thread,
1768 context_server_store,
1769 fs,
1770 ..
1771 } = setup(cx, TestModel::Fake).await;
1772 let fake_model = model.as_fake();
1773
1774 // Set up a profile with all tools enabled
1775 fs.insert_file(
1776 paths::settings_file(),
1777 json!({
1778 "agent": {
1779 "profiles": {
1780 "test": {
1781 "name": "Test Profile",
1782 "enable_all_context_servers": true,
1783 "tools": {
1784 EchoTool::NAME: true,
1785 DelayTool::NAME: true,
1786 WordListTool::NAME: true,
1787 ToolRequiringPermission::NAME: true,
1788 InfiniteTool::NAME: true,
1789 }
1790 },
1791 }
1792 }
1793 })
1794 .to_string()
1795 .into_bytes(),
1796 )
1797 .await;
1798 cx.run_until_parked();
1799
1800 thread.update(cx, |thread, cx| {
1801 thread.set_profile(AgentProfileId("test".into()), cx);
1802 thread.add_tool(EchoTool);
1803 thread.add_tool(DelayTool);
1804 thread.add_tool(WordListTool);
1805 thread.add_tool(ToolRequiringPermission);
1806 thread.add_tool(InfiniteTool);
1807 });
1808
1809 // Set up multiple context servers with some overlapping tool names
1810 let _server1_calls = setup_context_server(
1811 "xxx",
1812 vec![
1813 context_server::types::Tool {
1814 name: "echo".into(), // Conflicts with native EchoTool
1815 description: None,
1816 input_schema: serde_json::to_value(EchoTool::input_schema(
1817 LanguageModelToolSchemaFormat::JsonSchema,
1818 ))
1819 .unwrap(),
1820 output_schema: None,
1821 annotations: None,
1822 },
1823 context_server::types::Tool {
1824 name: "unique_tool_1".into(),
1825 description: None,
1826 input_schema: json!({"type": "object", "properties": {}}),
1827 output_schema: None,
1828 annotations: None,
1829 },
1830 ],
1831 &context_server_store,
1832 cx,
1833 );
1834
1835 let _server2_calls = setup_context_server(
1836 "yyy",
1837 vec![
1838 context_server::types::Tool {
1839 name: "echo".into(), // Also conflicts with native EchoTool
1840 description: None,
1841 input_schema: serde_json::to_value(EchoTool::input_schema(
1842 LanguageModelToolSchemaFormat::JsonSchema,
1843 ))
1844 .unwrap(),
1845 output_schema: None,
1846 annotations: None,
1847 },
1848 context_server::types::Tool {
1849 name: "unique_tool_2".into(),
1850 description: None,
1851 input_schema: json!({"type": "object", "properties": {}}),
1852 output_schema: None,
1853 annotations: None,
1854 },
1855 context_server::types::Tool {
1856 name: "a".repeat(MAX_TOOL_NAME_LENGTH - 2),
1857 description: None,
1858 input_schema: json!({"type": "object", "properties": {}}),
1859 output_schema: None,
1860 annotations: None,
1861 },
1862 context_server::types::Tool {
1863 name: "b".repeat(MAX_TOOL_NAME_LENGTH - 1),
1864 description: None,
1865 input_schema: json!({"type": "object", "properties": {}}),
1866 output_schema: None,
1867 annotations: None,
1868 },
1869 ],
1870 &context_server_store,
1871 cx,
1872 );
1873 let _server3_calls = setup_context_server(
1874 "zzz",
1875 vec![
1876 context_server::types::Tool {
1877 name: "a".repeat(MAX_TOOL_NAME_LENGTH - 2),
1878 description: None,
1879 input_schema: json!({"type": "object", "properties": {}}),
1880 output_schema: None,
1881 annotations: None,
1882 },
1883 context_server::types::Tool {
1884 name: "b".repeat(MAX_TOOL_NAME_LENGTH - 1),
1885 description: None,
1886 input_schema: json!({"type": "object", "properties": {}}),
1887 output_schema: None,
1888 annotations: None,
1889 },
1890 context_server::types::Tool {
1891 name: "c".repeat(MAX_TOOL_NAME_LENGTH + 1),
1892 description: None,
1893 input_schema: json!({"type": "object", "properties": {}}),
1894 output_schema: None,
1895 annotations: None,
1896 },
1897 ],
1898 &context_server_store,
1899 cx,
1900 );
1901
1902 // Server with spaces in name - tests snake_case conversion for API compatibility
1903 let _server4_calls = setup_context_server(
1904 "Azure DevOps",
1905 vec![context_server::types::Tool {
1906 name: "echo".into(), // Also conflicts - will be disambiguated as azure_dev_ops_echo
1907 description: None,
1908 input_schema: serde_json::to_value(EchoTool::input_schema(
1909 LanguageModelToolSchemaFormat::JsonSchema,
1910 ))
1911 .unwrap(),
1912 output_schema: None,
1913 annotations: None,
1914 }],
1915 &context_server_store,
1916 cx,
1917 );
1918
1919 thread
1920 .update(cx, |thread, cx| {
1921 thread.send(UserMessageId::new(), ["Go"], cx)
1922 })
1923 .unwrap();
1924 cx.run_until_parked();
1925 let completion = fake_model.pending_completions().pop().unwrap();
1926 assert_eq!(
1927 tool_names_for_completion(&completion),
1928 vec![
1929 "azure_dev_ops_echo",
1930 "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
1931 "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc",
1932 "delay",
1933 "echo",
1934 "infinite",
1935 "tool_requiring_permission",
1936 "unique_tool_1",
1937 "unique_tool_2",
1938 "word_list",
1939 "xxx_echo",
1940 "y_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
1941 "yyy_echo",
1942 "z_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
1943 ]
1944 );
1945}
1946
1947#[gpui::test]
1948#[cfg_attr(not(feature = "e2e"), ignore)]
1949async fn test_cancellation(cx: &mut TestAppContext) {
1950 let ThreadTest { thread, .. } = setup(cx, TestModel::Sonnet4).await;
1951
1952 let mut events = thread
1953 .update(cx, |thread, cx| {
1954 thread.add_tool(InfiniteTool);
1955 thread.add_tool(EchoTool);
1956 thread.send(
1957 UserMessageId::new(),
1958 ["Call the echo tool, then call the infinite tool, then explain their output"],
1959 cx,
1960 )
1961 })
1962 .unwrap();
1963
1964 // Wait until both tools are called.
1965 let mut expected_tools = vec!["Echo", "Infinite Tool"];
1966 let mut echo_id = None;
1967 let mut echo_completed = false;
1968 while let Some(event) = events.next().await {
1969 match event.unwrap() {
1970 ThreadEvent::ToolCall(tool_call) => {
1971 assert_eq!(tool_call.title, expected_tools.remove(0));
1972 if tool_call.title == "Echo" {
1973 echo_id = Some(tool_call.tool_call_id);
1974 }
1975 }
1976 ThreadEvent::ToolCallUpdate(acp_thread::ToolCallUpdate::UpdateFields(
1977 acp::ToolCallUpdate {
1978 tool_call_id,
1979 fields:
1980 acp::ToolCallUpdateFields {
1981 status: Some(acp::ToolCallStatus::Completed),
1982 ..
1983 },
1984 ..
1985 },
1986 )) if Some(&tool_call_id) == echo_id.as_ref() => {
1987 echo_completed = true;
1988 }
1989 _ => {}
1990 }
1991
1992 if expected_tools.is_empty() && echo_completed {
1993 break;
1994 }
1995 }
1996
1997 // Cancel the current send and ensure that the event stream is closed, even
1998 // if one of the tools is still running.
1999 thread.update(cx, |thread, cx| thread.cancel(cx)).await;
2000 let events = events.collect::<Vec<_>>().await;
2001 let last_event = events.last();
2002 assert!(
2003 matches!(
2004 last_event,
2005 Some(Ok(ThreadEvent::Stop(acp::StopReason::Cancelled)))
2006 ),
2007 "unexpected event {last_event:?}"
2008 );
2009
2010 // Ensure we can still send a new message after cancellation.
2011 let events = thread
2012 .update(cx, |thread, cx| {
2013 thread.send(
2014 UserMessageId::new(),
2015 ["Testing: reply with 'Hello' then stop."],
2016 cx,
2017 )
2018 })
2019 .unwrap()
2020 .collect::<Vec<_>>()
2021 .await;
2022 thread.update(cx, |thread, _cx| {
2023 let message = thread.last_received_or_pending_message().unwrap();
2024 let agent_message = message.as_agent_message().unwrap();
2025 assert_eq!(
2026 agent_message.content,
2027 vec![AgentMessageContent::Text("Hello".to_string())]
2028 );
2029 });
2030 assert_eq!(stop_events(events), vec![acp::StopReason::EndTurn]);
2031}
2032
2033#[gpui::test]
2034async fn test_terminal_tool_cancellation_captures_output(cx: &mut TestAppContext) {
2035 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
2036 always_allow_tools(cx);
2037 let fake_model = model.as_fake();
2038
2039 let environment = Rc::new(cx.update(|cx| {
2040 FakeThreadEnvironment::default().with_terminal(FakeTerminalHandle::new_never_exits(cx))
2041 }));
2042 let handle = environment.terminal_handle.clone().unwrap();
2043
2044 let mut events = thread
2045 .update(cx, |thread, cx| {
2046 thread.add_tool(crate::TerminalTool::new(
2047 thread.project().clone(),
2048 environment,
2049 ));
2050 thread.send(UserMessageId::new(), ["run a command"], cx)
2051 })
2052 .unwrap();
2053
2054 cx.run_until_parked();
2055
2056 // Simulate the model calling the terminal tool
2057 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
2058 LanguageModelToolUse {
2059 id: "terminal_tool_1".into(),
2060 name: TerminalTool::NAME.into(),
2061 raw_input: r#"{"command": "sleep 1000", "cd": "."}"#.into(),
2062 input: json!({"command": "sleep 1000", "cd": "."}),
2063 is_input_complete: true,
2064 thought_signature: None,
2065 },
2066 ));
2067 fake_model.end_last_completion_stream();
2068
2069 // Wait for the terminal tool to start running
2070 wait_for_terminal_tool_started(&mut events, cx).await;
2071
2072 // Cancel the thread while the terminal is running
2073 thread.update(cx, |thread, cx| thread.cancel(cx)).detach();
2074
2075 // Collect remaining events, driving the executor to let cancellation complete
2076 let remaining_events = collect_events_until_stop(&mut events, cx).await;
2077
2078 // Verify the terminal was killed
2079 assert!(
2080 handle.was_killed(),
2081 "expected terminal handle to be killed on cancellation"
2082 );
2083
2084 // Verify we got a cancellation stop event
2085 assert_eq!(
2086 stop_events(remaining_events),
2087 vec![acp::StopReason::Cancelled],
2088 );
2089
2090 // Verify the tool result contains the terminal output, not just "Tool canceled by user"
2091 thread.update(cx, |thread, _cx| {
2092 let message = thread.last_received_or_pending_message().unwrap();
2093 let agent_message = message.as_agent_message().unwrap();
2094
2095 let tool_use = agent_message
2096 .content
2097 .iter()
2098 .find_map(|content| match content {
2099 AgentMessageContent::ToolUse(tool_use) => Some(tool_use),
2100 _ => None,
2101 })
2102 .expect("expected tool use in agent message");
2103
2104 let tool_result = agent_message
2105 .tool_results
2106 .get(&tool_use.id)
2107 .expect("expected tool result");
2108
2109 let result_text = match &tool_result.content {
2110 language_model::LanguageModelToolResultContent::Text(text) => text.to_string(),
2111 _ => panic!("expected text content in tool result"),
2112 };
2113
2114 // "partial output" comes from FakeTerminalHandle's output field
2115 assert!(
2116 result_text.contains("partial output"),
2117 "expected tool result to contain terminal output, got: {result_text}"
2118 );
2119 // Match the actual format from process_content in terminal_tool.rs
2120 assert!(
2121 result_text.contains("The user stopped this command"),
2122 "expected tool result to indicate user stopped, got: {result_text}"
2123 );
2124 });
2125
2126 // Verify we can send a new message after cancellation
2127 verify_thread_recovery(&thread, &fake_model, cx).await;
2128}
2129
2130#[gpui::test]
2131async fn test_cancellation_aware_tool_responds_to_cancellation(cx: &mut TestAppContext) {
2132 // This test verifies that tools which properly handle cancellation via
2133 // `event_stream.cancelled_by_user()` (like edit_file_tool) respond promptly
2134 // to cancellation and report that they were cancelled.
2135 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
2136 always_allow_tools(cx);
2137 let fake_model = model.as_fake();
2138
2139 let (tool, was_cancelled) = CancellationAwareTool::new();
2140
2141 let mut events = thread
2142 .update(cx, |thread, cx| {
2143 thread.add_tool(tool);
2144 thread.send(
2145 UserMessageId::new(),
2146 ["call the cancellation aware tool"],
2147 cx,
2148 )
2149 })
2150 .unwrap();
2151
2152 cx.run_until_parked();
2153
2154 // Simulate the model calling the cancellation-aware tool
2155 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
2156 LanguageModelToolUse {
2157 id: "cancellation_aware_1".into(),
2158 name: "cancellation_aware".into(),
2159 raw_input: r#"{}"#.into(),
2160 input: json!({}),
2161 is_input_complete: true,
2162 thought_signature: None,
2163 },
2164 ));
2165 fake_model.end_last_completion_stream();
2166
2167 cx.run_until_parked();
2168
2169 // Wait for the tool call to be reported
2170 let mut tool_started = false;
2171 let deadline = cx.executor().num_cpus() * 100;
2172 for _ in 0..deadline {
2173 cx.run_until_parked();
2174
2175 while let Some(Some(event)) = events.next().now_or_never() {
2176 if let Ok(ThreadEvent::ToolCall(tool_call)) = &event {
2177 if tool_call.title == "Cancellation Aware Tool" {
2178 tool_started = true;
2179 break;
2180 }
2181 }
2182 }
2183
2184 if tool_started {
2185 break;
2186 }
2187
2188 cx.background_executor
2189 .timer(Duration::from_millis(10))
2190 .await;
2191 }
2192 assert!(tool_started, "expected cancellation aware tool to start");
2193
2194 // Cancel the thread and wait for it to complete
2195 let cancel_task = thread.update(cx, |thread, cx| thread.cancel(cx));
2196
2197 // The cancel task should complete promptly because the tool handles cancellation
2198 let timeout = cx.background_executor.timer(Duration::from_secs(5));
2199 futures::select! {
2200 _ = cancel_task.fuse() => {}
2201 _ = timeout.fuse() => {
2202 panic!("cancel task timed out - tool did not respond to cancellation");
2203 }
2204 }
2205
2206 // Verify the tool detected cancellation via its flag
2207 assert!(
2208 was_cancelled.load(std::sync::atomic::Ordering::SeqCst),
2209 "tool should have detected cancellation via event_stream.cancelled_by_user()"
2210 );
2211
2212 // Collect remaining events
2213 let remaining_events = collect_events_until_stop(&mut events, cx).await;
2214
2215 // Verify we got a cancellation stop event
2216 assert_eq!(
2217 stop_events(remaining_events),
2218 vec![acp::StopReason::Cancelled],
2219 );
2220
2221 // Verify we can send a new message after cancellation
2222 verify_thread_recovery(&thread, &fake_model, cx).await;
2223}
2224
2225/// Helper to verify thread can recover after cancellation by sending a simple message.
2226async fn verify_thread_recovery(
2227 thread: &Entity<Thread>,
2228 fake_model: &FakeLanguageModel,
2229 cx: &mut TestAppContext,
2230) {
2231 let events = thread
2232 .update(cx, |thread, cx| {
2233 thread.send(
2234 UserMessageId::new(),
2235 ["Testing: reply with 'Hello' then stop."],
2236 cx,
2237 )
2238 })
2239 .unwrap();
2240 cx.run_until_parked();
2241 fake_model.send_last_completion_stream_text_chunk("Hello");
2242 fake_model
2243 .send_last_completion_stream_event(LanguageModelCompletionEvent::Stop(StopReason::EndTurn));
2244 fake_model.end_last_completion_stream();
2245
2246 let events = events.collect::<Vec<_>>().await;
2247 thread.update(cx, |thread, _cx| {
2248 let message = thread.last_received_or_pending_message().unwrap();
2249 let agent_message = message.as_agent_message().unwrap();
2250 assert_eq!(
2251 agent_message.content,
2252 vec![AgentMessageContent::Text("Hello".to_string())]
2253 );
2254 });
2255 assert_eq!(stop_events(events), vec![acp::StopReason::EndTurn]);
2256}
2257
2258/// Waits for a terminal tool to start by watching for a ToolCallUpdate with terminal content.
2259async fn wait_for_terminal_tool_started(
2260 events: &mut mpsc::UnboundedReceiver<Result<ThreadEvent>>,
2261 cx: &mut TestAppContext,
2262) {
2263 let deadline = cx.executor().num_cpus() * 100; // Scale with available parallelism
2264 for _ in 0..deadline {
2265 cx.run_until_parked();
2266
2267 while let Some(Some(event)) = events.next().now_or_never() {
2268 if let Ok(ThreadEvent::ToolCallUpdate(acp_thread::ToolCallUpdate::UpdateFields(
2269 update,
2270 ))) = &event
2271 {
2272 if update.fields.content.as_ref().is_some_and(|content| {
2273 content
2274 .iter()
2275 .any(|c| matches!(c, acp::ToolCallContent::Terminal(_)))
2276 }) {
2277 return;
2278 }
2279 }
2280 }
2281
2282 cx.background_executor
2283 .timer(Duration::from_millis(10))
2284 .await;
2285 }
2286 panic!("terminal tool did not start within the expected time");
2287}
2288
2289/// Collects events until a Stop event is received, driving the executor to completion.
2290async fn collect_events_until_stop(
2291 events: &mut mpsc::UnboundedReceiver<Result<ThreadEvent>>,
2292 cx: &mut TestAppContext,
2293) -> Vec<Result<ThreadEvent>> {
2294 let mut collected = Vec::new();
2295 let deadline = cx.executor().num_cpus() * 200;
2296
2297 for _ in 0..deadline {
2298 cx.executor().advance_clock(Duration::from_millis(10));
2299 cx.run_until_parked();
2300
2301 while let Some(Some(event)) = events.next().now_or_never() {
2302 let is_stop = matches!(&event, Ok(ThreadEvent::Stop(_)));
2303 collected.push(event);
2304 if is_stop {
2305 return collected;
2306 }
2307 }
2308 }
2309 panic!(
2310 "did not receive Stop event within the expected time; collected {} events",
2311 collected.len()
2312 );
2313}
2314
2315#[gpui::test]
2316async fn test_truncate_while_terminal_tool_running(cx: &mut TestAppContext) {
2317 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
2318 always_allow_tools(cx);
2319 let fake_model = model.as_fake();
2320
2321 let environment = Rc::new(cx.update(|cx| {
2322 FakeThreadEnvironment::default().with_terminal(FakeTerminalHandle::new_never_exits(cx))
2323 }));
2324 let handle = environment.terminal_handle.clone().unwrap();
2325
2326 let message_id = UserMessageId::new();
2327 let mut events = thread
2328 .update(cx, |thread, cx| {
2329 thread.add_tool(crate::TerminalTool::new(
2330 thread.project().clone(),
2331 environment,
2332 ));
2333 thread.send(message_id.clone(), ["run a command"], cx)
2334 })
2335 .unwrap();
2336
2337 cx.run_until_parked();
2338
2339 // Simulate the model calling the terminal tool
2340 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
2341 LanguageModelToolUse {
2342 id: "terminal_tool_1".into(),
2343 name: TerminalTool::NAME.into(),
2344 raw_input: r#"{"command": "sleep 1000", "cd": "."}"#.into(),
2345 input: json!({"command": "sleep 1000", "cd": "."}),
2346 is_input_complete: true,
2347 thought_signature: None,
2348 },
2349 ));
2350 fake_model.end_last_completion_stream();
2351
2352 // Wait for the terminal tool to start running
2353 wait_for_terminal_tool_started(&mut events, cx).await;
2354
2355 // Truncate the thread while the terminal is running
2356 thread
2357 .update(cx, |thread, cx| thread.truncate(message_id, cx))
2358 .unwrap();
2359
2360 // Drive the executor to let cancellation complete
2361 let _ = collect_events_until_stop(&mut events, cx).await;
2362
2363 // Verify the terminal was killed
2364 assert!(
2365 handle.was_killed(),
2366 "expected terminal handle to be killed on truncate"
2367 );
2368
2369 // Verify the thread is empty after truncation
2370 thread.update(cx, |thread, _cx| {
2371 assert_eq!(
2372 thread.to_markdown(),
2373 "",
2374 "expected thread to be empty after truncating the only message"
2375 );
2376 });
2377
2378 // Verify we can send a new message after truncation
2379 verify_thread_recovery(&thread, &fake_model, cx).await;
2380}
2381
2382#[gpui::test]
2383async fn test_cancel_multiple_concurrent_terminal_tools(cx: &mut TestAppContext) {
2384 // Tests that cancellation properly kills all running terminal tools when multiple are active.
2385 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
2386 always_allow_tools(cx);
2387 let fake_model = model.as_fake();
2388
2389 let environment = Rc::new(MultiTerminalEnvironment::new());
2390
2391 let mut events = thread
2392 .update(cx, |thread, cx| {
2393 thread.add_tool(crate::TerminalTool::new(
2394 thread.project().clone(),
2395 environment.clone(),
2396 ));
2397 thread.send(UserMessageId::new(), ["run multiple commands"], cx)
2398 })
2399 .unwrap();
2400
2401 cx.run_until_parked();
2402
2403 // Simulate the model calling two terminal tools
2404 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
2405 LanguageModelToolUse {
2406 id: "terminal_tool_1".into(),
2407 name: TerminalTool::NAME.into(),
2408 raw_input: r#"{"command": "sleep 1000", "cd": "."}"#.into(),
2409 input: json!({"command": "sleep 1000", "cd": "."}),
2410 is_input_complete: true,
2411 thought_signature: None,
2412 },
2413 ));
2414 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
2415 LanguageModelToolUse {
2416 id: "terminal_tool_2".into(),
2417 name: TerminalTool::NAME.into(),
2418 raw_input: r#"{"command": "sleep 2000", "cd": "."}"#.into(),
2419 input: json!({"command": "sleep 2000", "cd": "."}),
2420 is_input_complete: true,
2421 thought_signature: None,
2422 },
2423 ));
2424 fake_model.end_last_completion_stream();
2425
2426 // Wait for both terminal tools to start by counting terminal content updates
2427 let mut terminals_started = 0;
2428 let deadline = cx.executor().num_cpus() * 100;
2429 for _ in 0..deadline {
2430 cx.run_until_parked();
2431
2432 while let Some(Some(event)) = events.next().now_or_never() {
2433 if let Ok(ThreadEvent::ToolCallUpdate(acp_thread::ToolCallUpdate::UpdateFields(
2434 update,
2435 ))) = &event
2436 {
2437 if update.fields.content.as_ref().is_some_and(|content| {
2438 content
2439 .iter()
2440 .any(|c| matches!(c, acp::ToolCallContent::Terminal(_)))
2441 }) {
2442 terminals_started += 1;
2443 if terminals_started >= 2 {
2444 break;
2445 }
2446 }
2447 }
2448 }
2449 if terminals_started >= 2 {
2450 break;
2451 }
2452
2453 cx.background_executor
2454 .timer(Duration::from_millis(10))
2455 .await;
2456 }
2457 assert!(
2458 terminals_started >= 2,
2459 "expected 2 terminal tools to start, got {terminals_started}"
2460 );
2461
2462 // Cancel the thread while both terminals are running
2463 thread.update(cx, |thread, cx| thread.cancel(cx)).detach();
2464
2465 // Collect remaining events
2466 let remaining_events = collect_events_until_stop(&mut events, cx).await;
2467
2468 // Verify both terminal handles were killed
2469 let handles = environment.handles();
2470 assert_eq!(
2471 handles.len(),
2472 2,
2473 "expected 2 terminal handles to be created"
2474 );
2475 assert!(
2476 handles[0].was_killed(),
2477 "expected first terminal handle to be killed on cancellation"
2478 );
2479 assert!(
2480 handles[1].was_killed(),
2481 "expected second terminal handle to be killed on cancellation"
2482 );
2483
2484 // Verify we got a cancellation stop event
2485 assert_eq!(
2486 stop_events(remaining_events),
2487 vec![acp::StopReason::Cancelled],
2488 );
2489}
2490
2491#[gpui::test]
2492async fn test_terminal_tool_stopped_via_terminal_card_button(cx: &mut TestAppContext) {
2493 // Tests that clicking the stop button on the terminal card (as opposed to the main
2494 // cancel button) properly reports user stopped via the was_stopped_by_user path.
2495 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
2496 always_allow_tools(cx);
2497 let fake_model = model.as_fake();
2498
2499 let environment = Rc::new(cx.update(|cx| {
2500 FakeThreadEnvironment::default().with_terminal(FakeTerminalHandle::new_never_exits(cx))
2501 }));
2502 let handle = environment.terminal_handle.clone().unwrap();
2503
2504 let mut events = thread
2505 .update(cx, |thread, cx| {
2506 thread.add_tool(crate::TerminalTool::new(
2507 thread.project().clone(),
2508 environment,
2509 ));
2510 thread.send(UserMessageId::new(), ["run a command"], cx)
2511 })
2512 .unwrap();
2513
2514 cx.run_until_parked();
2515
2516 // Simulate the model calling the terminal tool
2517 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
2518 LanguageModelToolUse {
2519 id: "terminal_tool_1".into(),
2520 name: TerminalTool::NAME.into(),
2521 raw_input: r#"{"command": "sleep 1000", "cd": "."}"#.into(),
2522 input: json!({"command": "sleep 1000", "cd": "."}),
2523 is_input_complete: true,
2524 thought_signature: None,
2525 },
2526 ));
2527 fake_model.end_last_completion_stream();
2528
2529 // Wait for the terminal tool to start running
2530 wait_for_terminal_tool_started(&mut events, cx).await;
2531
2532 // Simulate user clicking stop on the terminal card itself.
2533 // This sets the flag and signals exit (simulating what the real UI would do).
2534 handle.set_stopped_by_user(true);
2535 handle.killed.store(true, Ordering::SeqCst);
2536 handle.signal_exit();
2537
2538 // Wait for the tool to complete
2539 cx.run_until_parked();
2540
2541 // The thread continues after tool completion - simulate the model ending its turn
2542 fake_model
2543 .send_last_completion_stream_event(LanguageModelCompletionEvent::Stop(StopReason::EndTurn));
2544 fake_model.end_last_completion_stream();
2545
2546 // Collect remaining events
2547 let remaining_events = collect_events_until_stop(&mut events, cx).await;
2548
2549 // Verify we got an EndTurn (not Cancelled, since we didn't cancel the thread)
2550 assert_eq!(
2551 stop_events(remaining_events),
2552 vec![acp::StopReason::EndTurn],
2553 );
2554
2555 // Verify the tool result indicates user stopped
2556 thread.update(cx, |thread, _cx| {
2557 let message = thread.last_received_or_pending_message().unwrap();
2558 let agent_message = message.as_agent_message().unwrap();
2559
2560 let tool_use = agent_message
2561 .content
2562 .iter()
2563 .find_map(|content| match content {
2564 AgentMessageContent::ToolUse(tool_use) => Some(tool_use),
2565 _ => None,
2566 })
2567 .expect("expected tool use in agent message");
2568
2569 let tool_result = agent_message
2570 .tool_results
2571 .get(&tool_use.id)
2572 .expect("expected tool result");
2573
2574 let result_text = match &tool_result.content {
2575 language_model::LanguageModelToolResultContent::Text(text) => text.to_string(),
2576 _ => panic!("expected text content in tool result"),
2577 };
2578
2579 assert!(
2580 result_text.contains("The user stopped this command"),
2581 "expected tool result to indicate user stopped, got: {result_text}"
2582 );
2583 });
2584}
2585
2586#[gpui::test]
2587async fn test_terminal_tool_timeout_expires(cx: &mut TestAppContext) {
2588 // Tests that when a timeout is configured and expires, the tool result indicates timeout.
2589 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
2590 always_allow_tools(cx);
2591 let fake_model = model.as_fake();
2592
2593 let environment = Rc::new(cx.update(|cx| {
2594 FakeThreadEnvironment::default().with_terminal(FakeTerminalHandle::new_never_exits(cx))
2595 }));
2596 let handle = environment.terminal_handle.clone().unwrap();
2597
2598 let mut events = thread
2599 .update(cx, |thread, cx| {
2600 thread.add_tool(crate::TerminalTool::new(
2601 thread.project().clone(),
2602 environment,
2603 ));
2604 thread.send(UserMessageId::new(), ["run a command with timeout"], cx)
2605 })
2606 .unwrap();
2607
2608 cx.run_until_parked();
2609
2610 // Simulate the model calling the terminal tool with a short timeout
2611 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
2612 LanguageModelToolUse {
2613 id: "terminal_tool_1".into(),
2614 name: TerminalTool::NAME.into(),
2615 raw_input: r#"{"command": "sleep 1000", "cd": ".", "timeout_ms": 100}"#.into(),
2616 input: json!({"command": "sleep 1000", "cd": ".", "timeout_ms": 100}),
2617 is_input_complete: true,
2618 thought_signature: None,
2619 },
2620 ));
2621 fake_model.end_last_completion_stream();
2622
2623 // Wait for the terminal tool to start running
2624 wait_for_terminal_tool_started(&mut events, cx).await;
2625
2626 // Advance clock past the timeout
2627 cx.executor().advance_clock(Duration::from_millis(200));
2628 cx.run_until_parked();
2629
2630 // The thread continues after tool completion - simulate the model ending its turn
2631 fake_model
2632 .send_last_completion_stream_event(LanguageModelCompletionEvent::Stop(StopReason::EndTurn));
2633 fake_model.end_last_completion_stream();
2634
2635 // Collect remaining events
2636 let remaining_events = collect_events_until_stop(&mut events, cx).await;
2637
2638 // Verify the terminal was killed due to timeout
2639 assert!(
2640 handle.was_killed(),
2641 "expected terminal handle to be killed on timeout"
2642 );
2643
2644 // Verify we got an EndTurn (the tool completed, just with timeout)
2645 assert_eq!(
2646 stop_events(remaining_events),
2647 vec![acp::StopReason::EndTurn],
2648 );
2649
2650 // Verify the tool result indicates timeout, not user stopped
2651 thread.update(cx, |thread, _cx| {
2652 let message = thread.last_received_or_pending_message().unwrap();
2653 let agent_message = message.as_agent_message().unwrap();
2654
2655 let tool_use = agent_message
2656 .content
2657 .iter()
2658 .find_map(|content| match content {
2659 AgentMessageContent::ToolUse(tool_use) => Some(tool_use),
2660 _ => None,
2661 })
2662 .expect("expected tool use in agent message");
2663
2664 let tool_result = agent_message
2665 .tool_results
2666 .get(&tool_use.id)
2667 .expect("expected tool result");
2668
2669 let result_text = match &tool_result.content {
2670 language_model::LanguageModelToolResultContent::Text(text) => text.to_string(),
2671 _ => panic!("expected text content in tool result"),
2672 };
2673
2674 assert!(
2675 result_text.contains("timed out"),
2676 "expected tool result to indicate timeout, got: {result_text}"
2677 );
2678 assert!(
2679 !result_text.contains("The user stopped"),
2680 "tool result should not mention user stopped when it timed out, got: {result_text}"
2681 );
2682 });
2683}
2684
2685#[gpui::test]
2686async fn test_in_progress_send_canceled_by_next_send(cx: &mut TestAppContext) {
2687 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
2688 let fake_model = model.as_fake();
2689
2690 let events_1 = thread
2691 .update(cx, |thread, cx| {
2692 thread.send(UserMessageId::new(), ["Hello 1"], cx)
2693 })
2694 .unwrap();
2695 cx.run_until_parked();
2696 fake_model.send_last_completion_stream_text_chunk("Hey 1!");
2697 cx.run_until_parked();
2698
2699 let events_2 = thread
2700 .update(cx, |thread, cx| {
2701 thread.send(UserMessageId::new(), ["Hello 2"], cx)
2702 })
2703 .unwrap();
2704 cx.run_until_parked();
2705 fake_model.send_last_completion_stream_text_chunk("Hey 2!");
2706 fake_model
2707 .send_last_completion_stream_event(LanguageModelCompletionEvent::Stop(StopReason::EndTurn));
2708 fake_model.end_last_completion_stream();
2709
2710 let events_1 = events_1.collect::<Vec<_>>().await;
2711 assert_eq!(stop_events(events_1), vec![acp::StopReason::Cancelled]);
2712 let events_2 = events_2.collect::<Vec<_>>().await;
2713 assert_eq!(stop_events(events_2), vec![acp::StopReason::EndTurn]);
2714}
2715
2716#[gpui::test]
2717async fn test_retry_cancelled_promptly_on_new_send(cx: &mut TestAppContext) {
2718 // Regression test: when a completion fails with a retryable error (e.g. upstream 500),
2719 // the retry loop waits on a timer. If the user switches models and sends a new message
2720 // during that delay, the old turn should exit immediately instead of retrying with the
2721 // stale model.
2722 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
2723 let model_a = model.as_fake();
2724
2725 // Start a turn with model_a.
2726 let events_1 = thread
2727 .update(cx, |thread, cx| {
2728 thread.send(UserMessageId::new(), ["Hello"], cx)
2729 })
2730 .unwrap();
2731 cx.run_until_parked();
2732 assert_eq!(model_a.completion_count(), 1);
2733
2734 // Model returns a retryable upstream 500. The turn enters the retry delay.
2735 model_a.send_last_completion_stream_error(
2736 LanguageModelCompletionError::UpstreamProviderError {
2737 message: "Internal server error".to_string(),
2738 status: http_client::StatusCode::INTERNAL_SERVER_ERROR,
2739 retry_after: None,
2740 },
2741 );
2742 model_a.end_last_completion_stream();
2743 cx.run_until_parked();
2744
2745 // The old completion was consumed; model_a has no pending requests yet because the
2746 // retry timer hasn't fired.
2747 assert_eq!(model_a.completion_count(), 0);
2748
2749 // Switch to model_b and send a new message. This cancels the old turn.
2750 let model_b = Arc::new(FakeLanguageModel::with_id_and_thinking(
2751 "fake", "model-b", "Model B", false,
2752 ));
2753 thread.update(cx, |thread, cx| {
2754 thread.set_model(model_b.clone(), cx);
2755 });
2756 let events_2 = thread
2757 .update(cx, |thread, cx| {
2758 thread.send(UserMessageId::new(), ["Continue"], cx)
2759 })
2760 .unwrap();
2761 cx.run_until_parked();
2762
2763 // model_b should have received its completion request.
2764 assert_eq!(model_b.as_fake().completion_count(), 1);
2765
2766 // Advance the clock well past the retry delay (BASE_RETRY_DELAY = 5s).
2767 cx.executor().advance_clock(Duration::from_secs(10));
2768 cx.run_until_parked();
2769
2770 // model_a must NOT have received another completion request — the cancelled turn
2771 // should have exited during the retry delay rather than retrying with the old model.
2772 assert_eq!(
2773 model_a.completion_count(),
2774 0,
2775 "old model should not receive a retry request after cancellation"
2776 );
2777
2778 // Complete model_b's turn.
2779 model_b
2780 .as_fake()
2781 .send_last_completion_stream_text_chunk("Done!");
2782 model_b
2783 .as_fake()
2784 .send_last_completion_stream_event(LanguageModelCompletionEvent::Stop(StopReason::EndTurn));
2785 model_b.as_fake().end_last_completion_stream();
2786
2787 let events_1 = events_1.collect::<Vec<_>>().await;
2788 assert_eq!(stop_events(events_1), vec![acp::StopReason::Cancelled]);
2789
2790 let events_2 = events_2.collect::<Vec<_>>().await;
2791 assert_eq!(stop_events(events_2), vec![acp::StopReason::EndTurn]);
2792}
2793
2794#[gpui::test]
2795async fn test_subsequent_successful_sends_dont_cancel(cx: &mut TestAppContext) {
2796 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
2797 let fake_model = model.as_fake();
2798
2799 let events_1 = thread
2800 .update(cx, |thread, cx| {
2801 thread.send(UserMessageId::new(), ["Hello 1"], cx)
2802 })
2803 .unwrap();
2804 cx.run_until_parked();
2805 fake_model.send_last_completion_stream_text_chunk("Hey 1!");
2806 fake_model
2807 .send_last_completion_stream_event(LanguageModelCompletionEvent::Stop(StopReason::EndTurn));
2808 fake_model.end_last_completion_stream();
2809 let events_1 = events_1.collect::<Vec<_>>().await;
2810
2811 let events_2 = thread
2812 .update(cx, |thread, cx| {
2813 thread.send(UserMessageId::new(), ["Hello 2"], cx)
2814 })
2815 .unwrap();
2816 cx.run_until_parked();
2817 fake_model.send_last_completion_stream_text_chunk("Hey 2!");
2818 fake_model
2819 .send_last_completion_stream_event(LanguageModelCompletionEvent::Stop(StopReason::EndTurn));
2820 fake_model.end_last_completion_stream();
2821 let events_2 = events_2.collect::<Vec<_>>().await;
2822
2823 assert_eq!(stop_events(events_1), vec![acp::StopReason::EndTurn]);
2824 assert_eq!(stop_events(events_2), vec![acp::StopReason::EndTurn]);
2825}
2826
2827#[gpui::test]
2828async fn test_refusal(cx: &mut TestAppContext) {
2829 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
2830 let fake_model = model.as_fake();
2831
2832 let events = thread
2833 .update(cx, |thread, cx| {
2834 thread.send(UserMessageId::new(), ["Hello"], cx)
2835 })
2836 .unwrap();
2837 cx.run_until_parked();
2838 thread.read_with(cx, |thread, _| {
2839 assert_eq!(
2840 thread.to_markdown(),
2841 indoc! {"
2842 ## User
2843
2844 Hello
2845 "}
2846 );
2847 });
2848
2849 fake_model.send_last_completion_stream_text_chunk("Hey!");
2850 cx.run_until_parked();
2851 thread.read_with(cx, |thread, _| {
2852 assert_eq!(
2853 thread.to_markdown(),
2854 indoc! {"
2855 ## User
2856
2857 Hello
2858
2859 ## Assistant
2860
2861 Hey!
2862 "}
2863 );
2864 });
2865
2866 // If the model refuses to continue, the thread should remove all the messages after the last user message.
2867 fake_model
2868 .send_last_completion_stream_event(LanguageModelCompletionEvent::Stop(StopReason::Refusal));
2869 let events = events.collect::<Vec<_>>().await;
2870 assert_eq!(stop_events(events), vec![acp::StopReason::Refusal]);
2871 thread.read_with(cx, |thread, _| {
2872 assert_eq!(thread.to_markdown(), "");
2873 });
2874}
2875
2876#[gpui::test]
2877async fn test_truncate_first_message(cx: &mut TestAppContext) {
2878 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
2879 let fake_model = model.as_fake();
2880
2881 let message_id = UserMessageId::new();
2882 thread
2883 .update(cx, |thread, cx| {
2884 thread.send(message_id.clone(), ["Hello"], cx)
2885 })
2886 .unwrap();
2887 cx.run_until_parked();
2888 thread.read_with(cx, |thread, _| {
2889 assert_eq!(
2890 thread.to_markdown(),
2891 indoc! {"
2892 ## User
2893
2894 Hello
2895 "}
2896 );
2897 assert_eq!(thread.latest_token_usage(), None);
2898 });
2899
2900 fake_model.send_last_completion_stream_text_chunk("Hey!");
2901 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::UsageUpdate(
2902 language_model::TokenUsage {
2903 input_tokens: 32_000,
2904 output_tokens: 16_000,
2905 cache_creation_input_tokens: 0,
2906 cache_read_input_tokens: 0,
2907 },
2908 ));
2909 cx.run_until_parked();
2910 thread.read_with(cx, |thread, _| {
2911 assert_eq!(
2912 thread.to_markdown(),
2913 indoc! {"
2914 ## User
2915
2916 Hello
2917
2918 ## Assistant
2919
2920 Hey!
2921 "}
2922 );
2923 assert_eq!(
2924 thread.latest_token_usage(),
2925 Some(acp_thread::TokenUsage {
2926 used_tokens: 32_000 + 16_000,
2927 max_tokens: 1_000_000,
2928 max_output_tokens: None,
2929 input_tokens: 32_000,
2930 output_tokens: 16_000,
2931 })
2932 );
2933 });
2934
2935 thread
2936 .update(cx, |thread, cx| thread.truncate(message_id, cx))
2937 .unwrap();
2938 cx.run_until_parked();
2939 thread.read_with(cx, |thread, _| {
2940 assert_eq!(thread.to_markdown(), "");
2941 assert_eq!(thread.latest_token_usage(), None);
2942 });
2943
2944 // Ensure we can still send a new message after truncation.
2945 thread
2946 .update(cx, |thread, cx| {
2947 thread.send(UserMessageId::new(), ["Hi"], cx)
2948 })
2949 .unwrap();
2950 thread.update(cx, |thread, _cx| {
2951 assert_eq!(
2952 thread.to_markdown(),
2953 indoc! {"
2954 ## User
2955
2956 Hi
2957 "}
2958 );
2959 });
2960 cx.run_until_parked();
2961 fake_model.send_last_completion_stream_text_chunk("Ahoy!");
2962 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::UsageUpdate(
2963 language_model::TokenUsage {
2964 input_tokens: 40_000,
2965 output_tokens: 20_000,
2966 cache_creation_input_tokens: 0,
2967 cache_read_input_tokens: 0,
2968 },
2969 ));
2970 cx.run_until_parked();
2971 thread.read_with(cx, |thread, _| {
2972 assert_eq!(
2973 thread.to_markdown(),
2974 indoc! {"
2975 ## User
2976
2977 Hi
2978
2979 ## Assistant
2980
2981 Ahoy!
2982 "}
2983 );
2984
2985 assert_eq!(
2986 thread.latest_token_usage(),
2987 Some(acp_thread::TokenUsage {
2988 used_tokens: 40_000 + 20_000,
2989 max_tokens: 1_000_000,
2990 max_output_tokens: None,
2991 input_tokens: 40_000,
2992 output_tokens: 20_000,
2993 })
2994 );
2995 });
2996}
2997
2998#[gpui::test]
2999async fn test_truncate_second_message(cx: &mut TestAppContext) {
3000 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
3001 let fake_model = model.as_fake();
3002
3003 thread
3004 .update(cx, |thread, cx| {
3005 thread.send(UserMessageId::new(), ["Message 1"], cx)
3006 })
3007 .unwrap();
3008 cx.run_until_parked();
3009 fake_model.send_last_completion_stream_text_chunk("Message 1 response");
3010 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::UsageUpdate(
3011 language_model::TokenUsage {
3012 input_tokens: 32_000,
3013 output_tokens: 16_000,
3014 cache_creation_input_tokens: 0,
3015 cache_read_input_tokens: 0,
3016 },
3017 ));
3018 fake_model.end_last_completion_stream();
3019 cx.run_until_parked();
3020
3021 let assert_first_message_state = |cx: &mut TestAppContext| {
3022 thread.clone().read_with(cx, |thread, _| {
3023 assert_eq!(
3024 thread.to_markdown(),
3025 indoc! {"
3026 ## User
3027
3028 Message 1
3029
3030 ## Assistant
3031
3032 Message 1 response
3033 "}
3034 );
3035
3036 assert_eq!(
3037 thread.latest_token_usage(),
3038 Some(acp_thread::TokenUsage {
3039 used_tokens: 32_000 + 16_000,
3040 max_tokens: 1_000_000,
3041 max_output_tokens: None,
3042 input_tokens: 32_000,
3043 output_tokens: 16_000,
3044 })
3045 );
3046 });
3047 };
3048
3049 assert_first_message_state(cx);
3050
3051 let second_message_id = UserMessageId::new();
3052 thread
3053 .update(cx, |thread, cx| {
3054 thread.send(second_message_id.clone(), ["Message 2"], cx)
3055 })
3056 .unwrap();
3057 cx.run_until_parked();
3058
3059 fake_model.send_last_completion_stream_text_chunk("Message 2 response");
3060 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::UsageUpdate(
3061 language_model::TokenUsage {
3062 input_tokens: 40_000,
3063 output_tokens: 20_000,
3064 cache_creation_input_tokens: 0,
3065 cache_read_input_tokens: 0,
3066 },
3067 ));
3068 fake_model.end_last_completion_stream();
3069 cx.run_until_parked();
3070
3071 thread.read_with(cx, |thread, _| {
3072 assert_eq!(
3073 thread.to_markdown(),
3074 indoc! {"
3075 ## User
3076
3077 Message 1
3078
3079 ## Assistant
3080
3081 Message 1 response
3082
3083 ## User
3084
3085 Message 2
3086
3087 ## Assistant
3088
3089 Message 2 response
3090 "}
3091 );
3092
3093 assert_eq!(
3094 thread.latest_token_usage(),
3095 Some(acp_thread::TokenUsage {
3096 used_tokens: 40_000 + 20_000,
3097 max_tokens: 1_000_000,
3098 max_output_tokens: None,
3099 input_tokens: 40_000,
3100 output_tokens: 20_000,
3101 })
3102 );
3103 });
3104
3105 thread
3106 .update(cx, |thread, cx| thread.truncate(second_message_id, cx))
3107 .unwrap();
3108 cx.run_until_parked();
3109
3110 assert_first_message_state(cx);
3111}
3112
3113#[gpui::test]
3114async fn test_title_generation(cx: &mut TestAppContext) {
3115 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
3116 let fake_model = model.as_fake();
3117
3118 let summary_model = Arc::new(FakeLanguageModel::default());
3119 thread.update(cx, |thread, cx| {
3120 thread.set_summarization_model(Some(summary_model.clone()), cx)
3121 });
3122
3123 let send = thread
3124 .update(cx, |thread, cx| {
3125 thread.send(UserMessageId::new(), ["Hello"], cx)
3126 })
3127 .unwrap();
3128 cx.run_until_parked();
3129
3130 fake_model.send_last_completion_stream_text_chunk("Hey!");
3131 fake_model.end_last_completion_stream();
3132 cx.run_until_parked();
3133 thread.read_with(cx, |thread, _| assert_eq!(thread.title(), None));
3134
3135 // Ensure the summary model has been invoked to generate a title.
3136 summary_model.send_last_completion_stream_text_chunk("Hello ");
3137 summary_model.send_last_completion_stream_text_chunk("world\nG");
3138 summary_model.send_last_completion_stream_text_chunk("oodnight Moon");
3139 summary_model.end_last_completion_stream();
3140 send.collect::<Vec<_>>().await;
3141 cx.run_until_parked();
3142 thread.read_with(cx, |thread, _| {
3143 assert_eq!(thread.title(), Some("Hello world".into()))
3144 });
3145
3146 // Send another message, ensuring no title is generated this time.
3147 let send = thread
3148 .update(cx, |thread, cx| {
3149 thread.send(UserMessageId::new(), ["Hello again"], cx)
3150 })
3151 .unwrap();
3152 cx.run_until_parked();
3153 fake_model.send_last_completion_stream_text_chunk("Hey again!");
3154 fake_model.end_last_completion_stream();
3155 cx.run_until_parked();
3156 assert_eq!(summary_model.pending_completions(), Vec::new());
3157 send.collect::<Vec<_>>().await;
3158 thread.read_with(cx, |thread, _| {
3159 assert_eq!(thread.title(), Some("Hello world".into()))
3160 });
3161}
3162
3163#[gpui::test]
3164async fn test_building_request_with_pending_tools(cx: &mut TestAppContext) {
3165 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
3166 let fake_model = model.as_fake();
3167
3168 let _events = thread
3169 .update(cx, |thread, cx| {
3170 thread.add_tool(ToolRequiringPermission);
3171 thread.add_tool(EchoTool);
3172 thread.send(UserMessageId::new(), ["Hey!"], cx)
3173 })
3174 .unwrap();
3175 cx.run_until_parked();
3176
3177 let permission_tool_use = LanguageModelToolUse {
3178 id: "tool_id_1".into(),
3179 name: ToolRequiringPermission::NAME.into(),
3180 raw_input: "{}".into(),
3181 input: json!({}),
3182 is_input_complete: true,
3183 thought_signature: None,
3184 };
3185 let echo_tool_use = LanguageModelToolUse {
3186 id: "tool_id_2".into(),
3187 name: EchoTool::NAME.into(),
3188 raw_input: json!({"text": "test"}).to_string(),
3189 input: json!({"text": "test"}),
3190 is_input_complete: true,
3191 thought_signature: None,
3192 };
3193 fake_model.send_last_completion_stream_text_chunk("Hi!");
3194 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
3195 permission_tool_use,
3196 ));
3197 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
3198 echo_tool_use.clone(),
3199 ));
3200 fake_model.end_last_completion_stream();
3201 cx.run_until_parked();
3202
3203 // Ensure pending tools are skipped when building a request.
3204 let request = thread
3205 .read_with(cx, |thread, cx| {
3206 thread.build_completion_request(CompletionIntent::EditFile, cx)
3207 })
3208 .unwrap();
3209 assert_eq!(
3210 request.messages[1..],
3211 vec![
3212 LanguageModelRequestMessage {
3213 role: Role::User,
3214 content: vec!["Hey!".into()],
3215 cache: true,
3216 reasoning_details: None,
3217 },
3218 LanguageModelRequestMessage {
3219 role: Role::Assistant,
3220 content: vec![
3221 MessageContent::Text("Hi!".into()),
3222 MessageContent::ToolUse(echo_tool_use.clone())
3223 ],
3224 cache: false,
3225 reasoning_details: None,
3226 },
3227 LanguageModelRequestMessage {
3228 role: Role::User,
3229 content: vec![MessageContent::ToolResult(LanguageModelToolResult {
3230 tool_use_id: echo_tool_use.id.clone(),
3231 tool_name: echo_tool_use.name,
3232 is_error: false,
3233 content: "test".into(),
3234 output: Some("test".into())
3235 })],
3236 cache: false,
3237 reasoning_details: None,
3238 },
3239 ],
3240 );
3241}
3242
3243#[gpui::test]
3244async fn test_agent_connection(cx: &mut TestAppContext) {
3245 cx.update(settings::init);
3246 let templates = Templates::new();
3247
3248 // Initialize language model system with test provider
3249 cx.update(|cx| {
3250 gpui_tokio::init(cx);
3251
3252 let http_client = FakeHttpClient::with_404_response();
3253 let clock = Arc::new(clock::FakeSystemClock::new());
3254 let client = Client::new(clock, http_client, cx);
3255 let user_store = cx.new(|cx| UserStore::new(client.clone(), cx));
3256 language_model::init(cx);
3257 RefreshLlmTokenListener::register(client.clone(), user_store.clone(), cx);
3258 language_models::init(user_store, client.clone(), cx);
3259 LanguageModelRegistry::test(cx);
3260 });
3261 cx.executor().forbid_parking();
3262
3263 // Create a project for new_thread
3264 let fake_fs = cx.update(|cx| fs::FakeFs::new(cx.background_executor().clone()));
3265 fake_fs.insert_tree(path!("/test"), json!({})).await;
3266 let project = Project::test(fake_fs.clone(), [Path::new("/test")], cx).await;
3267 let cwd = PathList::new(&[Path::new("/test")]);
3268 let thread_store = cx.new(|cx| ThreadStore::new(cx));
3269
3270 // Create agent and connection
3271 let agent = cx
3272 .update(|cx| NativeAgent::new(thread_store, templates.clone(), None, fake_fs.clone(), cx));
3273 let connection = NativeAgentConnection(agent.clone());
3274
3275 // Create a thread using new_thread
3276 let connection_rc = Rc::new(connection.clone());
3277 let acp_thread = cx
3278 .update(|cx| connection_rc.new_session(project, cwd, cx))
3279 .await
3280 .expect("new_thread should succeed");
3281
3282 // Get the session_id from the AcpThread
3283 let session_id = acp_thread.read_with(cx, |thread, _| thread.session_id().clone());
3284
3285 // Test model_selector returns Some
3286 let selector_opt = connection.model_selector(&session_id);
3287 assert!(
3288 selector_opt.is_some(),
3289 "agent should always support ModelSelector"
3290 );
3291 let selector = selector_opt.unwrap();
3292
3293 // Test list_models
3294 let listed_models = cx
3295 .update(|cx| selector.list_models(cx))
3296 .await
3297 .expect("list_models should succeed");
3298 let AgentModelList::Grouped(listed_models) = listed_models else {
3299 panic!("Unexpected model list type");
3300 };
3301 assert!(!listed_models.is_empty(), "should have at least one model");
3302 assert_eq!(
3303 listed_models[&AgentModelGroupName("Fake".into())][0]
3304 .id
3305 .0
3306 .as_ref(),
3307 "fake/fake"
3308 );
3309
3310 // Test selected_model returns the default
3311 let model = cx
3312 .update(|cx| selector.selected_model(cx))
3313 .await
3314 .expect("selected_model should succeed");
3315 let model = cx
3316 .update(|cx| agent.read(cx).models().model_from_id(&model.id))
3317 .unwrap();
3318 let model = model.as_fake();
3319 assert_eq!(model.id().0, "fake", "should return default model");
3320
3321 let request = acp_thread.update(cx, |thread, cx| thread.send(vec!["abc".into()], cx));
3322 cx.run_until_parked();
3323 model.send_last_completion_stream_text_chunk("def");
3324 cx.run_until_parked();
3325 acp_thread.read_with(cx, |thread, cx| {
3326 assert_eq!(
3327 thread.to_markdown(cx),
3328 indoc! {"
3329 ## User
3330
3331 abc
3332
3333 ## Assistant
3334
3335 def
3336
3337 "}
3338 )
3339 });
3340
3341 // Test cancel
3342 cx.update(|cx| connection.cancel(&session_id, cx));
3343 request.await.expect("prompt should fail gracefully");
3344
3345 // Explicitly close the session and drop the ACP thread.
3346 cx.update(|cx| Rc::new(connection.clone()).close_session(&session_id, cx))
3347 .await
3348 .unwrap();
3349 drop(acp_thread);
3350 let result = cx
3351 .update(|cx| {
3352 connection.prompt(
3353 Some(acp_thread::UserMessageId::new()),
3354 acp::PromptRequest::new(session_id.clone(), vec!["ghi".into()]),
3355 cx,
3356 )
3357 })
3358 .await;
3359 assert_eq!(
3360 result.as_ref().unwrap_err().to_string(),
3361 "Session not found",
3362 "unexpected result: {:?}",
3363 result
3364 );
3365}
3366
3367#[gpui::test]
3368async fn test_tool_updates_to_completion(cx: &mut TestAppContext) {
3369 let ThreadTest { thread, model, .. } = setup(cx, TestModel::Fake).await;
3370 thread.update(cx, |thread, _cx| thread.add_tool(EchoTool));
3371 let fake_model = model.as_fake();
3372
3373 let mut events = thread
3374 .update(cx, |thread, cx| {
3375 thread.send(UserMessageId::new(), ["Echo something"], cx)
3376 })
3377 .unwrap();
3378 cx.run_until_parked();
3379
3380 // Simulate streaming partial input.
3381 let input = json!({});
3382 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
3383 LanguageModelToolUse {
3384 id: "1".into(),
3385 name: EchoTool::NAME.into(),
3386 raw_input: input.to_string(),
3387 input,
3388 is_input_complete: false,
3389 thought_signature: None,
3390 },
3391 ));
3392
3393 // Input streaming completed
3394 let input = json!({ "text": "Hello!" });
3395 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
3396 LanguageModelToolUse {
3397 id: "1".into(),
3398 name: "echo".into(),
3399 raw_input: input.to_string(),
3400 input,
3401 is_input_complete: true,
3402 thought_signature: None,
3403 },
3404 ));
3405 fake_model.end_last_completion_stream();
3406 cx.run_until_parked();
3407
3408 let tool_call = expect_tool_call(&mut events).await;
3409 assert_eq!(
3410 tool_call,
3411 acp::ToolCall::new("1", "Echo")
3412 .raw_input(json!({}))
3413 .meta(acp::Meta::from_iter([("tool_name".into(), "echo".into())]))
3414 );
3415 let update = expect_tool_call_update_fields(&mut events).await;
3416 assert_eq!(
3417 update,
3418 acp::ToolCallUpdate::new(
3419 "1",
3420 acp::ToolCallUpdateFields::new()
3421 .title("Echo")
3422 .kind(acp::ToolKind::Other)
3423 .raw_input(json!({ "text": "Hello!"}))
3424 )
3425 );
3426 let update = expect_tool_call_update_fields(&mut events).await;
3427 assert_eq!(
3428 update,
3429 acp::ToolCallUpdate::new(
3430 "1",
3431 acp::ToolCallUpdateFields::new().status(acp::ToolCallStatus::InProgress)
3432 )
3433 );
3434 let update = expect_tool_call_update_fields(&mut events).await;
3435 assert_eq!(
3436 update,
3437 acp::ToolCallUpdate::new(
3438 "1",
3439 acp::ToolCallUpdateFields::new()
3440 .status(acp::ToolCallStatus::Completed)
3441 .raw_output("Hello!")
3442 )
3443 );
3444}
3445
3446#[gpui::test]
3447async fn test_update_plan_tool_updates_thread_events(cx: &mut TestAppContext) {
3448 let ThreadTest { thread, model, .. } = setup(cx, TestModel::Fake).await;
3449 thread.update(cx, |thread, _cx| thread.add_tool(UpdatePlanTool));
3450 let fake_model = model.as_fake();
3451
3452 let mut events = thread
3453 .update(cx, |thread, cx| {
3454 thread.send(UserMessageId::new(), ["Make a plan"], cx)
3455 })
3456 .unwrap();
3457 cx.run_until_parked();
3458
3459 let input = json!({
3460 "plan": [
3461 {
3462 "step": "Inspect the code",
3463 "status": "completed",
3464 },
3465 {
3466 "step": "Implement the tool",
3467 "status": "in_progress"
3468 },
3469 {
3470 "step": "Run tests",
3471 "status": "pending",
3472 }
3473 ]
3474 });
3475 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
3476 LanguageModelToolUse {
3477 id: "plan_1".into(),
3478 name: UpdatePlanTool::NAME.into(),
3479 raw_input: input.to_string(),
3480 input,
3481 is_input_complete: true,
3482 thought_signature: None,
3483 },
3484 ));
3485 fake_model.end_last_completion_stream();
3486 cx.run_until_parked();
3487
3488 let tool_call = expect_tool_call(&mut events).await;
3489 assert_eq!(
3490 tool_call,
3491 acp::ToolCall::new("plan_1", "Update plan")
3492 .kind(acp::ToolKind::Think)
3493 .raw_input(json!({
3494 "plan": [
3495 {
3496 "step": "Inspect the code",
3497 "status": "completed",
3498 },
3499 {
3500 "step": "Implement the tool",
3501 "status": "in_progress"
3502 },
3503 {
3504 "step": "Run tests",
3505 "status": "pending",
3506 }
3507 ]
3508 }))
3509 .meta(acp::Meta::from_iter([(
3510 "tool_name".into(),
3511 "update_plan".into()
3512 )]))
3513 );
3514
3515 let update = expect_tool_call_update_fields(&mut events).await;
3516 assert_eq!(
3517 update,
3518 acp::ToolCallUpdate::new(
3519 "plan_1",
3520 acp::ToolCallUpdateFields::new().status(acp::ToolCallStatus::InProgress)
3521 )
3522 );
3523
3524 let plan = expect_plan(&mut events).await;
3525 assert_eq!(
3526 plan,
3527 acp::Plan::new(vec![
3528 acp::PlanEntry::new(
3529 "Inspect the code",
3530 acp::PlanEntryPriority::Medium,
3531 acp::PlanEntryStatus::Completed,
3532 ),
3533 acp::PlanEntry::new(
3534 "Implement the tool",
3535 acp::PlanEntryPriority::Medium,
3536 acp::PlanEntryStatus::InProgress,
3537 ),
3538 acp::PlanEntry::new(
3539 "Run tests",
3540 acp::PlanEntryPriority::Medium,
3541 acp::PlanEntryStatus::Pending,
3542 ),
3543 ])
3544 );
3545
3546 let update = expect_tool_call_update_fields(&mut events).await;
3547 assert_eq!(
3548 update,
3549 acp::ToolCallUpdate::new(
3550 "plan_1",
3551 acp::ToolCallUpdateFields::new()
3552 .status(acp::ToolCallStatus::Completed)
3553 .raw_output("Plan updated")
3554 )
3555 );
3556}
3557
3558#[gpui::test]
3559async fn test_send_no_retry_on_success(cx: &mut TestAppContext) {
3560 let ThreadTest { thread, model, .. } = setup(cx, TestModel::Fake).await;
3561 let fake_model = model.as_fake();
3562
3563 let mut events = thread
3564 .update(cx, |thread, cx| {
3565 thread.send(UserMessageId::new(), ["Hello!"], cx)
3566 })
3567 .unwrap();
3568 cx.run_until_parked();
3569
3570 fake_model.send_last_completion_stream_text_chunk("Hey!");
3571 fake_model.end_last_completion_stream();
3572
3573 let mut retry_events = Vec::new();
3574 while let Some(Ok(event)) = events.next().await {
3575 match event {
3576 ThreadEvent::Retry(retry_status) => {
3577 retry_events.push(retry_status);
3578 }
3579 ThreadEvent::Stop(..) => break,
3580 _ => {}
3581 }
3582 }
3583
3584 assert_eq!(retry_events.len(), 0);
3585 thread.read_with(cx, |thread, _cx| {
3586 assert_eq!(
3587 thread.to_markdown(),
3588 indoc! {"
3589 ## User
3590
3591 Hello!
3592
3593 ## Assistant
3594
3595 Hey!
3596 "}
3597 )
3598 });
3599}
3600
3601#[gpui::test]
3602async fn test_send_retry_on_error(cx: &mut TestAppContext) {
3603 let ThreadTest { thread, model, .. } = setup(cx, TestModel::Fake).await;
3604 let fake_model = model.as_fake();
3605
3606 let mut events = thread
3607 .update(cx, |thread, cx| {
3608 thread.send(UserMessageId::new(), ["Hello!"], cx)
3609 })
3610 .unwrap();
3611 cx.run_until_parked();
3612
3613 fake_model.send_last_completion_stream_text_chunk("Hey,");
3614 fake_model.send_last_completion_stream_error(LanguageModelCompletionError::ServerOverloaded {
3615 provider: LanguageModelProviderName::new("Anthropic"),
3616 retry_after: Some(Duration::from_secs(3)),
3617 });
3618 fake_model.end_last_completion_stream();
3619
3620 cx.executor().advance_clock(Duration::from_secs(3));
3621 cx.run_until_parked();
3622
3623 fake_model.send_last_completion_stream_text_chunk("there!");
3624 fake_model.end_last_completion_stream();
3625 cx.run_until_parked();
3626
3627 let mut retry_events = Vec::new();
3628 while let Some(Ok(event)) = events.next().await {
3629 match event {
3630 ThreadEvent::Retry(retry_status) => {
3631 retry_events.push(retry_status);
3632 }
3633 ThreadEvent::Stop(..) => break,
3634 _ => {}
3635 }
3636 }
3637
3638 assert_eq!(retry_events.len(), 1);
3639 assert!(matches!(
3640 retry_events[0],
3641 acp_thread::RetryStatus { attempt: 1, .. }
3642 ));
3643 thread.read_with(cx, |thread, _cx| {
3644 assert_eq!(
3645 thread.to_markdown(),
3646 indoc! {"
3647 ## User
3648
3649 Hello!
3650
3651 ## Assistant
3652
3653 Hey,
3654
3655 [resume]
3656
3657 ## Assistant
3658
3659 there!
3660 "}
3661 )
3662 });
3663}
3664
3665#[gpui::test]
3666async fn test_send_retry_finishes_tool_calls_on_error(cx: &mut TestAppContext) {
3667 let ThreadTest { thread, model, .. } = setup(cx, TestModel::Fake).await;
3668 let fake_model = model.as_fake();
3669
3670 let events = thread
3671 .update(cx, |thread, cx| {
3672 thread.add_tool(EchoTool);
3673 thread.send(UserMessageId::new(), ["Call the echo tool!"], cx)
3674 })
3675 .unwrap();
3676 cx.run_until_parked();
3677
3678 let tool_use_1 = LanguageModelToolUse {
3679 id: "tool_1".into(),
3680 name: EchoTool::NAME.into(),
3681 raw_input: json!({"text": "test"}).to_string(),
3682 input: json!({"text": "test"}),
3683 is_input_complete: true,
3684 thought_signature: None,
3685 };
3686 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
3687 tool_use_1.clone(),
3688 ));
3689 fake_model.send_last_completion_stream_error(LanguageModelCompletionError::ServerOverloaded {
3690 provider: LanguageModelProviderName::new("Anthropic"),
3691 retry_after: Some(Duration::from_secs(3)),
3692 });
3693 fake_model.end_last_completion_stream();
3694
3695 cx.executor().advance_clock(Duration::from_secs(3));
3696 let completion = fake_model.pending_completions().pop().unwrap();
3697 assert_eq!(
3698 completion.messages[1..],
3699 vec![
3700 LanguageModelRequestMessage {
3701 role: Role::User,
3702 content: vec!["Call the echo tool!".into()],
3703 cache: false,
3704 reasoning_details: None,
3705 },
3706 LanguageModelRequestMessage {
3707 role: Role::Assistant,
3708 content: vec![language_model::MessageContent::ToolUse(tool_use_1.clone())],
3709 cache: false,
3710 reasoning_details: None,
3711 },
3712 LanguageModelRequestMessage {
3713 role: Role::User,
3714 content: vec![language_model::MessageContent::ToolResult(
3715 LanguageModelToolResult {
3716 tool_use_id: tool_use_1.id.clone(),
3717 tool_name: tool_use_1.name.clone(),
3718 is_error: false,
3719 content: "test".into(),
3720 output: Some("test".into())
3721 }
3722 )],
3723 cache: true,
3724 reasoning_details: None,
3725 },
3726 ]
3727 );
3728
3729 fake_model.send_last_completion_stream_text_chunk("Done");
3730 fake_model.end_last_completion_stream();
3731 cx.run_until_parked();
3732 events.collect::<Vec<_>>().await;
3733 thread.read_with(cx, |thread, _cx| {
3734 assert_eq!(
3735 thread.last_received_or_pending_message(),
3736 Some(Message::Agent(AgentMessage {
3737 content: vec![AgentMessageContent::Text("Done".into())],
3738 tool_results: IndexMap::default(),
3739 reasoning_details: None,
3740 }))
3741 );
3742 })
3743}
3744
3745#[gpui::test]
3746async fn test_send_max_retries_exceeded(cx: &mut TestAppContext) {
3747 let ThreadTest { thread, model, .. } = setup(cx, TestModel::Fake).await;
3748 let fake_model = model.as_fake();
3749
3750 let mut events = thread
3751 .update(cx, |thread, cx| {
3752 thread.send(UserMessageId::new(), ["Hello!"], cx)
3753 })
3754 .unwrap();
3755 cx.run_until_parked();
3756
3757 for _ in 0..crate::thread::MAX_RETRY_ATTEMPTS + 1 {
3758 fake_model.send_last_completion_stream_error(
3759 LanguageModelCompletionError::ServerOverloaded {
3760 provider: LanguageModelProviderName::new("Anthropic"),
3761 retry_after: Some(Duration::from_secs(3)),
3762 },
3763 );
3764 fake_model.end_last_completion_stream();
3765 cx.executor().advance_clock(Duration::from_secs(3));
3766 cx.run_until_parked();
3767 }
3768
3769 let mut errors = Vec::new();
3770 let mut retry_events = Vec::new();
3771 while let Some(event) = events.next().await {
3772 match event {
3773 Ok(ThreadEvent::Retry(retry_status)) => {
3774 retry_events.push(retry_status);
3775 }
3776 Ok(ThreadEvent::Stop(..)) => break,
3777 Err(error) => errors.push(error),
3778 _ => {}
3779 }
3780 }
3781
3782 assert_eq!(
3783 retry_events.len(),
3784 crate::thread::MAX_RETRY_ATTEMPTS as usize
3785 );
3786 for i in 0..crate::thread::MAX_RETRY_ATTEMPTS as usize {
3787 assert_eq!(retry_events[i].attempt, i + 1);
3788 }
3789 assert_eq!(errors.len(), 1);
3790 let error = errors[0]
3791 .downcast_ref::<LanguageModelCompletionError>()
3792 .unwrap();
3793 assert!(matches!(
3794 error,
3795 LanguageModelCompletionError::ServerOverloaded { .. }
3796 ));
3797}
3798
3799#[gpui::test]
3800async fn test_streaming_tool_completes_when_llm_stream_ends_without_final_input(
3801 cx: &mut TestAppContext,
3802) {
3803 init_test(cx);
3804 always_allow_tools(cx);
3805
3806 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
3807 let fake_model = model.as_fake();
3808
3809 thread.update(cx, |thread, _cx| {
3810 thread.add_tool(StreamingEchoTool::new());
3811 });
3812
3813 let _events = thread
3814 .update(cx, |thread, cx| {
3815 thread.send(UserMessageId::new(), ["Use the streaming_echo tool"], cx)
3816 })
3817 .unwrap();
3818 cx.run_until_parked();
3819
3820 // Send a partial tool use (is_input_complete = false), simulating the LLM
3821 // streaming input for a tool.
3822 let tool_use = LanguageModelToolUse {
3823 id: "tool_1".into(),
3824 name: "streaming_echo".into(),
3825 raw_input: r#"{"text": "partial"}"#.into(),
3826 input: json!({"text": "partial"}),
3827 is_input_complete: false,
3828 thought_signature: None,
3829 };
3830 fake_model
3831 .send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(tool_use.clone()));
3832 cx.run_until_parked();
3833
3834 // Send a stream error WITHOUT ever sending is_input_complete = true.
3835 // Before the fix, this would deadlock: the tool waits for more partials
3836 // (or cancellation), run_turn_internal waits for the tool, and the sender
3837 // keeping the channel open lives inside RunningTurn.
3838 fake_model.send_last_completion_stream_error(
3839 LanguageModelCompletionError::UpstreamProviderError {
3840 message: "Internal server error".to_string(),
3841 status: http_client::StatusCode::INTERNAL_SERVER_ERROR,
3842 retry_after: None,
3843 },
3844 );
3845 fake_model.end_last_completion_stream();
3846
3847 // Advance past the retry delay so run_turn_internal retries.
3848 cx.executor().advance_clock(Duration::from_secs(5));
3849 cx.run_until_parked();
3850
3851 // The retry request should contain the streaming tool's error result,
3852 // proving the tool terminated and its result was forwarded.
3853 let completion = fake_model
3854 .pending_completions()
3855 .pop()
3856 .expect("No running turn");
3857 assert_eq!(
3858 completion.messages[1..],
3859 vec![
3860 LanguageModelRequestMessage {
3861 role: Role::User,
3862 content: vec!["Use the streaming_echo tool".into()],
3863 cache: false,
3864 reasoning_details: None,
3865 },
3866 LanguageModelRequestMessage {
3867 role: Role::Assistant,
3868 content: vec![language_model::MessageContent::ToolUse(tool_use.clone())],
3869 cache: false,
3870 reasoning_details: None,
3871 },
3872 LanguageModelRequestMessage {
3873 role: Role::User,
3874 content: vec![language_model::MessageContent::ToolResult(
3875 LanguageModelToolResult {
3876 tool_use_id: tool_use.id.clone(),
3877 tool_name: tool_use.name,
3878 is_error: true,
3879 content: "Failed to receive tool input: tool input was not fully received"
3880 .into(),
3881 output: Some(
3882 "Failed to receive tool input: tool input was not fully received"
3883 .into()
3884 ),
3885 }
3886 )],
3887 cache: true,
3888 reasoning_details: None,
3889 },
3890 ]
3891 );
3892
3893 // Finish the retry round so the turn completes cleanly.
3894 fake_model.send_last_completion_stream_text_chunk("Done");
3895 fake_model.end_last_completion_stream();
3896 cx.run_until_parked();
3897
3898 thread.read_with(cx, |thread, _cx| {
3899 assert!(
3900 thread.is_turn_complete(),
3901 "Thread should not be stuck; the turn should have completed",
3902 );
3903 });
3904}
3905
3906#[gpui::test]
3907async fn test_streaming_tool_json_parse_error_is_forwarded_to_running_tool(
3908 cx: &mut TestAppContext,
3909) {
3910 init_test(cx);
3911 always_allow_tools(cx);
3912
3913 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
3914 let fake_model = model.as_fake();
3915
3916 thread.update(cx, |thread, _cx| {
3917 thread.add_tool(StreamingJsonErrorContextTool);
3918 });
3919
3920 let _events = thread
3921 .update(cx, |thread, cx| {
3922 thread.send(
3923 UserMessageId::new(),
3924 ["Use the streaming_json_error_context tool"],
3925 cx,
3926 )
3927 })
3928 .unwrap();
3929 cx.run_until_parked();
3930
3931 let tool_use = LanguageModelToolUse {
3932 id: "tool_1".into(),
3933 name: StreamingJsonErrorContextTool::NAME.into(),
3934 raw_input: r#"{"text": "partial"#.into(),
3935 input: json!({"text": "partial"}),
3936 is_input_complete: false,
3937 thought_signature: None,
3938 };
3939 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(tool_use));
3940 cx.run_until_parked();
3941
3942 fake_model.send_last_completion_stream_event(
3943 LanguageModelCompletionEvent::ToolUseJsonParseError {
3944 id: "tool_1".into(),
3945 tool_name: StreamingJsonErrorContextTool::NAME.into(),
3946 raw_input: r#"{"text": "partial"#.into(),
3947 json_parse_error: "EOF while parsing a string at line 1 column 17".into(),
3948 },
3949 );
3950 fake_model
3951 .send_last_completion_stream_event(LanguageModelCompletionEvent::Stop(StopReason::ToolUse));
3952 fake_model.end_last_completion_stream();
3953 cx.run_until_parked();
3954
3955 cx.executor().advance_clock(Duration::from_secs(5));
3956 cx.run_until_parked();
3957
3958 let completion = fake_model
3959 .pending_completions()
3960 .pop()
3961 .expect("No running turn");
3962
3963 let tool_results: Vec<_> = completion
3964 .messages
3965 .iter()
3966 .flat_map(|message| &message.content)
3967 .filter_map(|content| match content {
3968 MessageContent::ToolResult(result)
3969 if result.tool_use_id == language_model::LanguageModelToolUseId::from("tool_1") =>
3970 {
3971 Some(result)
3972 }
3973 _ => None,
3974 })
3975 .collect();
3976
3977 assert_eq!(
3978 tool_results.len(),
3979 1,
3980 "Expected exactly 1 tool result for tool_1, got {}: {:#?}",
3981 tool_results.len(),
3982 tool_results
3983 );
3984
3985 let result = tool_results[0];
3986 assert!(result.is_error);
3987 let content_text = match &result.content {
3988 language_model::LanguageModelToolResultContent::Text(text) => text.to_string(),
3989 other => panic!("Expected text content, got {:?}", other),
3990 };
3991 assert!(
3992 content_text.contains("Saw partial text 'partial' before invalid JSON"),
3993 "Expected tool-enriched partial context, got: {content_text}"
3994 );
3995 assert!(
3996 content_text
3997 .contains("Error parsing input JSON: EOF while parsing a string at line 1 column 17"),
3998 "Expected forwarded JSON parse error, got: {content_text}"
3999 );
4000 assert!(
4001 !content_text.contains("tool input was not fully received"),
4002 "Should not contain orphaned sender error, got: {content_text}"
4003 );
4004
4005 fake_model.send_last_completion_stream_text_chunk("Done");
4006 fake_model.end_last_completion_stream();
4007 cx.run_until_parked();
4008
4009 thread.read_with(cx, |thread, _cx| {
4010 assert!(
4011 thread.is_turn_complete(),
4012 "Thread should not be stuck; the turn should have completed",
4013 );
4014 });
4015}
4016
4017/// Filters out the stop events for asserting against in tests
4018fn stop_events(result_events: Vec<Result<ThreadEvent>>) -> Vec<acp::StopReason> {
4019 result_events
4020 .into_iter()
4021 .filter_map(|event| match event.unwrap() {
4022 ThreadEvent::Stop(stop_reason) => Some(stop_reason),
4023 _ => None,
4024 })
4025 .collect()
4026}
4027
4028struct ThreadTest {
4029 model: Arc<dyn LanguageModel>,
4030 thread: Entity<Thread>,
4031 project_context: Entity<ProjectContext>,
4032 context_server_store: Entity<ContextServerStore>,
4033 fs: Arc<FakeFs>,
4034}
4035
4036enum TestModel {
4037 Sonnet4,
4038 Fake,
4039}
4040
4041impl TestModel {
4042 fn id(&self) -> LanguageModelId {
4043 match self {
4044 TestModel::Sonnet4 => LanguageModelId("claude-sonnet-4-latest".into()),
4045 TestModel::Fake => unreachable!(),
4046 }
4047 }
4048}
4049
4050async fn setup(cx: &mut TestAppContext, model: TestModel) -> ThreadTest {
4051 cx.executor().allow_parking();
4052
4053 let fs = FakeFs::new(cx.background_executor.clone());
4054 fs.create_dir(paths::settings_file().parent().unwrap())
4055 .await
4056 .unwrap();
4057 fs.insert_file(
4058 paths::settings_file(),
4059 json!({
4060 "agent": {
4061 "default_profile": "test-profile",
4062 "profiles": {
4063 "test-profile": {
4064 "name": "Test Profile",
4065 "tools": {
4066 EchoTool::NAME: true,
4067 DelayTool::NAME: true,
4068 WordListTool::NAME: true,
4069 ToolRequiringPermission::NAME: true,
4070 InfiniteTool::NAME: true,
4071 CancellationAwareTool::NAME: true,
4072 StreamingEchoTool::NAME: true,
4073 StreamingJsonErrorContextTool::NAME: true,
4074 StreamingFailingEchoTool::NAME: true,
4075 TerminalTool::NAME: true,
4076 UpdatePlanTool::NAME: true,
4077 }
4078 }
4079 }
4080 }
4081 })
4082 .to_string()
4083 .into_bytes(),
4084 )
4085 .await;
4086
4087 cx.update(|cx| {
4088 settings::init(cx);
4089
4090 match model {
4091 TestModel::Fake => {}
4092 TestModel::Sonnet4 => {
4093 gpui_tokio::init(cx);
4094 let http_client = ReqwestClient::user_agent("agent tests").unwrap();
4095 cx.set_http_client(Arc::new(http_client));
4096 let client = Client::production(cx);
4097 let user_store = cx.new(|cx| UserStore::new(client.clone(), cx));
4098 language_model::init(cx);
4099 RefreshLlmTokenListener::register(client.clone(), user_store.clone(), cx);
4100 language_models::init(user_store, client.clone(), cx);
4101 }
4102 };
4103
4104 watch_settings(fs.clone(), cx);
4105 });
4106
4107 let templates = Templates::new();
4108
4109 fs.insert_tree(path!("/test"), json!({})).await;
4110 let project = Project::test(fs.clone(), [path!("/test").as_ref()], cx).await;
4111
4112 let model = cx
4113 .update(|cx| {
4114 if let TestModel::Fake = model {
4115 Task::ready(Arc::new(FakeLanguageModel::default()) as Arc<_>)
4116 } else {
4117 let model_id = model.id();
4118 let models = LanguageModelRegistry::read_global(cx);
4119 let model = models
4120 .available_models(cx)
4121 .find(|model| model.id() == model_id)
4122 .unwrap();
4123
4124 let provider = models.provider(&model.provider_id()).unwrap();
4125 let authenticated = provider.authenticate(cx);
4126
4127 cx.spawn(async move |_cx| {
4128 authenticated.await.unwrap();
4129 model
4130 })
4131 }
4132 })
4133 .await;
4134
4135 let project_context = cx.new(|_cx| ProjectContext::default());
4136 let context_server_store = project.read_with(cx, |project, _| project.context_server_store());
4137 let context_server_registry =
4138 cx.new(|cx| ContextServerRegistry::new(context_server_store.clone(), cx));
4139 let thread = cx.new(|cx| {
4140 Thread::new(
4141 project,
4142 project_context.clone(),
4143 context_server_registry,
4144 templates,
4145 Some(model.clone()),
4146 cx,
4147 )
4148 });
4149 ThreadTest {
4150 model,
4151 thread,
4152 project_context,
4153 context_server_store,
4154 fs,
4155 }
4156}
4157
4158#[cfg(test)]
4159#[ctor::ctor]
4160fn init_logger() {
4161 if std::env::var("RUST_LOG").is_ok() {
4162 env_logger::init();
4163 }
4164}
4165
4166fn watch_settings(fs: Arc<dyn Fs>, cx: &mut App) {
4167 let fs = fs.clone();
4168 cx.spawn({
4169 async move |cx| {
4170 let (mut new_settings_content_rx, watcher_task) = settings::watch_config_file(
4171 cx.background_executor(),
4172 fs,
4173 paths::settings_file().clone(),
4174 );
4175 let _watcher_task = watcher_task;
4176
4177 while let Some(new_settings_content) = new_settings_content_rx.next().await {
4178 cx.update(|cx| {
4179 SettingsStore::update_global(cx, |settings, cx| {
4180 settings.set_user_settings(&new_settings_content, cx)
4181 })
4182 })
4183 .ok();
4184 }
4185 }
4186 })
4187 .detach();
4188}
4189
4190fn tool_names_for_completion(completion: &LanguageModelRequest) -> Vec<String> {
4191 completion
4192 .tools
4193 .iter()
4194 .map(|tool| tool.name.clone())
4195 .collect()
4196}
4197
4198fn setup_context_server(
4199 name: &'static str,
4200 tools: Vec<context_server::types::Tool>,
4201 context_server_store: &Entity<ContextServerStore>,
4202 cx: &mut TestAppContext,
4203) -> mpsc::UnboundedReceiver<(
4204 context_server::types::CallToolParams,
4205 oneshot::Sender<context_server::types::CallToolResponse>,
4206)> {
4207 cx.update(|cx| {
4208 let mut settings = ProjectSettings::get_global(cx).clone();
4209 settings.context_servers.insert(
4210 name.into(),
4211 project::project_settings::ContextServerSettings::Stdio {
4212 enabled: true,
4213 remote: false,
4214 command: ContextServerCommand {
4215 path: "somebinary".into(),
4216 args: Vec::new(),
4217 env: None,
4218 timeout: None,
4219 },
4220 },
4221 );
4222 ProjectSettings::override_global(settings, cx);
4223 });
4224
4225 let (mcp_tool_calls_tx, mcp_tool_calls_rx) = mpsc::unbounded();
4226 let fake_transport = context_server::test::create_fake_transport(name, cx.executor())
4227 .on_request::<context_server::types::requests::Initialize, _>(move |_params| async move {
4228 context_server::types::InitializeResponse {
4229 protocol_version: context_server::types::ProtocolVersion(
4230 context_server::types::LATEST_PROTOCOL_VERSION.to_string(),
4231 ),
4232 server_info: context_server::types::Implementation {
4233 name: name.into(),
4234 version: "1.0.0".to_string(),
4235 },
4236 capabilities: context_server::types::ServerCapabilities {
4237 tools: Some(context_server::types::ToolsCapabilities {
4238 list_changed: Some(true),
4239 }),
4240 ..Default::default()
4241 },
4242 meta: None,
4243 }
4244 })
4245 .on_request::<context_server::types::requests::ListTools, _>(move |_params| {
4246 let tools = tools.clone();
4247 async move {
4248 context_server::types::ListToolsResponse {
4249 tools,
4250 next_cursor: None,
4251 meta: None,
4252 }
4253 }
4254 })
4255 .on_request::<context_server::types::requests::CallTool, _>(move |params| {
4256 let mcp_tool_calls_tx = mcp_tool_calls_tx.clone();
4257 async move {
4258 let (response_tx, response_rx) = oneshot::channel();
4259 mcp_tool_calls_tx
4260 .unbounded_send((params, response_tx))
4261 .unwrap();
4262 response_rx.await.unwrap()
4263 }
4264 });
4265 context_server_store.update(cx, |store, cx| {
4266 store.start_server(
4267 Arc::new(ContextServer::new(
4268 ContextServerId(name.into()),
4269 Arc::new(fake_transport),
4270 )),
4271 cx,
4272 );
4273 });
4274 cx.run_until_parked();
4275 mcp_tool_calls_rx
4276}
4277
4278#[gpui::test]
4279async fn test_tokens_before_message(cx: &mut TestAppContext) {
4280 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
4281 let fake_model = model.as_fake();
4282
4283 // First message
4284 let message_1_id = UserMessageId::new();
4285 thread
4286 .update(cx, |thread, cx| {
4287 thread.send(message_1_id.clone(), ["First message"], cx)
4288 })
4289 .unwrap();
4290 cx.run_until_parked();
4291
4292 // Before any response, tokens_before_message should return None for first message
4293 thread.read_with(cx, |thread, _| {
4294 assert_eq!(
4295 thread.tokens_before_message(&message_1_id),
4296 None,
4297 "First message should have no tokens before it"
4298 );
4299 });
4300
4301 // Complete first message with usage
4302 fake_model.send_last_completion_stream_text_chunk("Response 1");
4303 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::UsageUpdate(
4304 language_model::TokenUsage {
4305 input_tokens: 100,
4306 output_tokens: 50,
4307 cache_creation_input_tokens: 0,
4308 cache_read_input_tokens: 0,
4309 },
4310 ));
4311 fake_model.end_last_completion_stream();
4312 cx.run_until_parked();
4313
4314 // First message still has no tokens before it
4315 thread.read_with(cx, |thread, _| {
4316 assert_eq!(
4317 thread.tokens_before_message(&message_1_id),
4318 None,
4319 "First message should still have no tokens before it after response"
4320 );
4321 });
4322
4323 // Second message
4324 let message_2_id = UserMessageId::new();
4325 thread
4326 .update(cx, |thread, cx| {
4327 thread.send(message_2_id.clone(), ["Second message"], cx)
4328 })
4329 .unwrap();
4330 cx.run_until_parked();
4331
4332 // Second message should have first message's input tokens before it
4333 thread.read_with(cx, |thread, _| {
4334 assert_eq!(
4335 thread.tokens_before_message(&message_2_id),
4336 Some(100),
4337 "Second message should have 100 tokens before it (from first request)"
4338 );
4339 });
4340
4341 // Complete second message
4342 fake_model.send_last_completion_stream_text_chunk("Response 2");
4343 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::UsageUpdate(
4344 language_model::TokenUsage {
4345 input_tokens: 250, // Total for this request (includes previous context)
4346 output_tokens: 75,
4347 cache_creation_input_tokens: 0,
4348 cache_read_input_tokens: 0,
4349 },
4350 ));
4351 fake_model.end_last_completion_stream();
4352 cx.run_until_parked();
4353
4354 // Third message
4355 let message_3_id = UserMessageId::new();
4356 thread
4357 .update(cx, |thread, cx| {
4358 thread.send(message_3_id.clone(), ["Third message"], cx)
4359 })
4360 .unwrap();
4361 cx.run_until_parked();
4362
4363 // Third message should have second message's input tokens (250) before it
4364 thread.read_with(cx, |thread, _| {
4365 assert_eq!(
4366 thread.tokens_before_message(&message_3_id),
4367 Some(250),
4368 "Third message should have 250 tokens before it (from second request)"
4369 );
4370 // Second message should still have 100
4371 assert_eq!(
4372 thread.tokens_before_message(&message_2_id),
4373 Some(100),
4374 "Second message should still have 100 tokens before it"
4375 );
4376 // First message still has none
4377 assert_eq!(
4378 thread.tokens_before_message(&message_1_id),
4379 None,
4380 "First message should still have no tokens before it"
4381 );
4382 });
4383}
4384
4385#[gpui::test]
4386async fn test_tokens_before_message_after_truncate(cx: &mut TestAppContext) {
4387 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
4388 let fake_model = model.as_fake();
4389
4390 // Set up three messages with responses
4391 let message_1_id = UserMessageId::new();
4392 thread
4393 .update(cx, |thread, cx| {
4394 thread.send(message_1_id.clone(), ["Message 1"], cx)
4395 })
4396 .unwrap();
4397 cx.run_until_parked();
4398 fake_model.send_last_completion_stream_text_chunk("Response 1");
4399 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::UsageUpdate(
4400 language_model::TokenUsage {
4401 input_tokens: 100,
4402 output_tokens: 50,
4403 cache_creation_input_tokens: 0,
4404 cache_read_input_tokens: 0,
4405 },
4406 ));
4407 fake_model.end_last_completion_stream();
4408 cx.run_until_parked();
4409
4410 let message_2_id = UserMessageId::new();
4411 thread
4412 .update(cx, |thread, cx| {
4413 thread.send(message_2_id.clone(), ["Message 2"], cx)
4414 })
4415 .unwrap();
4416 cx.run_until_parked();
4417 fake_model.send_last_completion_stream_text_chunk("Response 2");
4418 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::UsageUpdate(
4419 language_model::TokenUsage {
4420 input_tokens: 250,
4421 output_tokens: 75,
4422 cache_creation_input_tokens: 0,
4423 cache_read_input_tokens: 0,
4424 },
4425 ));
4426 fake_model.end_last_completion_stream();
4427 cx.run_until_parked();
4428
4429 // Verify initial state
4430 thread.read_with(cx, |thread, _| {
4431 assert_eq!(thread.tokens_before_message(&message_2_id), Some(100));
4432 });
4433
4434 // Truncate at message 2 (removes message 2 and everything after)
4435 thread
4436 .update(cx, |thread, cx| thread.truncate(message_2_id.clone(), cx))
4437 .unwrap();
4438 cx.run_until_parked();
4439
4440 // After truncation, message_2_id no longer exists, so lookup should return None
4441 thread.read_with(cx, |thread, _| {
4442 assert_eq!(
4443 thread.tokens_before_message(&message_2_id),
4444 None,
4445 "After truncation, message 2 no longer exists"
4446 );
4447 // Message 1 still exists but has no tokens before it
4448 assert_eq!(
4449 thread.tokens_before_message(&message_1_id),
4450 None,
4451 "First message still has no tokens before it"
4452 );
4453 });
4454}
4455
4456#[gpui::test]
4457async fn test_terminal_tool_permission_rules(cx: &mut TestAppContext) {
4458 init_test(cx);
4459
4460 let fs = FakeFs::new(cx.executor());
4461 fs.insert_tree("/root", json!({})).await;
4462 let project = Project::test(fs, ["/root".as_ref()], cx).await;
4463
4464 // Test 1: Deny rule blocks command
4465 {
4466 let environment = Rc::new(cx.update(|cx| {
4467 FakeThreadEnvironment::default().with_terminal(FakeTerminalHandle::new_never_exits(cx))
4468 }));
4469
4470 cx.update(|cx| {
4471 let mut settings = agent_settings::AgentSettings::get_global(cx).clone();
4472 settings.tool_permissions.tools.insert(
4473 TerminalTool::NAME.into(),
4474 agent_settings::ToolRules {
4475 default: Some(settings::ToolPermissionMode::Confirm),
4476 always_allow: vec![],
4477 always_deny: vec![
4478 agent_settings::CompiledRegex::new(r"rm\s+-rf", false).unwrap(),
4479 ],
4480 always_confirm: vec![],
4481 invalid_patterns: vec![],
4482 },
4483 );
4484 agent_settings::AgentSettings::override_global(settings, cx);
4485 });
4486
4487 #[allow(clippy::arc_with_non_send_sync)]
4488 let tool = Arc::new(crate::TerminalTool::new(project.clone(), environment));
4489 let (event_stream, _rx) = crate::ToolCallEventStream::test();
4490
4491 let task = cx.update(|cx| {
4492 tool.run(
4493 ToolInput::resolved(crate::TerminalToolInput {
4494 command: "rm -rf /".to_string(),
4495 cd: ".".to_string(),
4496 timeout_ms: None,
4497 }),
4498 event_stream,
4499 cx,
4500 )
4501 });
4502
4503 let result = task.await;
4504 assert!(
4505 result.is_err(),
4506 "expected command to be blocked by deny rule"
4507 );
4508 let err_msg = result.unwrap_err().to_lowercase();
4509 assert!(
4510 err_msg.contains("blocked"),
4511 "error should mention the command was blocked"
4512 );
4513 }
4514
4515 // Test 2: Allow rule skips confirmation (and overrides default: Deny)
4516 {
4517 let environment = Rc::new(cx.update(|cx| {
4518 FakeThreadEnvironment::default()
4519 .with_terminal(FakeTerminalHandle::new_with_immediate_exit(cx, 0))
4520 }));
4521
4522 cx.update(|cx| {
4523 let mut settings = agent_settings::AgentSettings::get_global(cx).clone();
4524 settings.tool_permissions.tools.insert(
4525 TerminalTool::NAME.into(),
4526 agent_settings::ToolRules {
4527 default: Some(settings::ToolPermissionMode::Deny),
4528 always_allow: vec![
4529 agent_settings::CompiledRegex::new(r"^echo\s", false).unwrap(),
4530 ],
4531 always_deny: vec![],
4532 always_confirm: vec![],
4533 invalid_patterns: vec![],
4534 },
4535 );
4536 agent_settings::AgentSettings::override_global(settings, cx);
4537 });
4538
4539 #[allow(clippy::arc_with_non_send_sync)]
4540 let tool = Arc::new(crate::TerminalTool::new(project.clone(), environment));
4541 let (event_stream, mut rx) = crate::ToolCallEventStream::test();
4542
4543 let task = cx.update(|cx| {
4544 tool.run(
4545 ToolInput::resolved(crate::TerminalToolInput {
4546 command: "echo hello".to_string(),
4547 cd: ".".to_string(),
4548 timeout_ms: None,
4549 }),
4550 event_stream,
4551 cx,
4552 )
4553 });
4554
4555 let update = rx.expect_update_fields().await;
4556 assert!(
4557 update.content.iter().any(|blocks| {
4558 blocks
4559 .iter()
4560 .any(|c| matches!(c, acp::ToolCallContent::Terminal(_)))
4561 }),
4562 "expected terminal content (allow rule should skip confirmation and override default deny)"
4563 );
4564
4565 let result = task.await;
4566 assert!(
4567 result.is_ok(),
4568 "expected command to succeed without confirmation"
4569 );
4570 }
4571
4572 // Test 3: global default: allow does NOT override always_confirm patterns
4573 {
4574 let environment = Rc::new(cx.update(|cx| {
4575 FakeThreadEnvironment::default()
4576 .with_terminal(FakeTerminalHandle::new_with_immediate_exit(cx, 0))
4577 }));
4578
4579 cx.update(|cx| {
4580 let mut settings = agent_settings::AgentSettings::get_global(cx).clone();
4581 settings.tool_permissions.default = settings::ToolPermissionMode::Allow;
4582 settings.tool_permissions.tools.insert(
4583 TerminalTool::NAME.into(),
4584 agent_settings::ToolRules {
4585 default: Some(settings::ToolPermissionMode::Allow),
4586 always_allow: vec![],
4587 always_deny: vec![],
4588 always_confirm: vec![
4589 agent_settings::CompiledRegex::new(r"sudo", false).unwrap(),
4590 ],
4591 invalid_patterns: vec![],
4592 },
4593 );
4594 agent_settings::AgentSettings::override_global(settings, cx);
4595 });
4596
4597 #[allow(clippy::arc_with_non_send_sync)]
4598 let tool = Arc::new(crate::TerminalTool::new(project.clone(), environment));
4599 let (event_stream, mut rx) = crate::ToolCallEventStream::test();
4600
4601 let _task = cx.update(|cx| {
4602 tool.run(
4603 ToolInput::resolved(crate::TerminalToolInput {
4604 command: "sudo rm file".to_string(),
4605 cd: ".".to_string(),
4606 timeout_ms: None,
4607 }),
4608 event_stream,
4609 cx,
4610 )
4611 });
4612
4613 // With global default: allow, confirm patterns are still respected
4614 // The expect_authorization() call will panic if no authorization is requested,
4615 // which validates that the confirm pattern still triggers confirmation
4616 let _auth = rx.expect_authorization().await;
4617
4618 drop(_task);
4619 }
4620
4621 // Test 4: tool-specific default: deny is respected even with global default: allow
4622 {
4623 let environment = Rc::new(cx.update(|cx| {
4624 FakeThreadEnvironment::default()
4625 .with_terminal(FakeTerminalHandle::new_with_immediate_exit(cx, 0))
4626 }));
4627
4628 cx.update(|cx| {
4629 let mut settings = agent_settings::AgentSettings::get_global(cx).clone();
4630 settings.tool_permissions.default = settings::ToolPermissionMode::Allow;
4631 settings.tool_permissions.tools.insert(
4632 TerminalTool::NAME.into(),
4633 agent_settings::ToolRules {
4634 default: Some(settings::ToolPermissionMode::Deny),
4635 always_allow: vec![],
4636 always_deny: vec![],
4637 always_confirm: vec![],
4638 invalid_patterns: vec![],
4639 },
4640 );
4641 agent_settings::AgentSettings::override_global(settings, cx);
4642 });
4643
4644 #[allow(clippy::arc_with_non_send_sync)]
4645 let tool = Arc::new(crate::TerminalTool::new(project.clone(), environment));
4646 let (event_stream, _rx) = crate::ToolCallEventStream::test();
4647
4648 let task = cx.update(|cx| {
4649 tool.run(
4650 ToolInput::resolved(crate::TerminalToolInput {
4651 command: "echo hello".to_string(),
4652 cd: ".".to_string(),
4653 timeout_ms: None,
4654 }),
4655 event_stream,
4656 cx,
4657 )
4658 });
4659
4660 // tool-specific default: deny is respected even with global default: allow
4661 let result = task.await;
4662 assert!(
4663 result.is_err(),
4664 "expected command to be blocked by tool-specific deny default"
4665 );
4666 let err_msg = result.unwrap_err().to_lowercase();
4667 assert!(
4668 err_msg.contains("disabled"),
4669 "error should mention the tool is disabled, got: {err_msg}"
4670 );
4671 }
4672}
4673
4674#[gpui::test]
4675async fn test_subagent_tool_call_end_to_end(cx: &mut TestAppContext) {
4676 init_test(cx);
4677 cx.update(|cx| {
4678 LanguageModelRegistry::test(cx);
4679 });
4680 cx.update(|cx| {
4681 cx.update_flags(true, vec!["subagents".to_string()]);
4682 });
4683
4684 let fs = FakeFs::new(cx.executor());
4685 fs.insert_tree(
4686 "/",
4687 json!({
4688 "a": {
4689 "b.md": "Lorem"
4690 }
4691 }),
4692 )
4693 .await;
4694 let project = Project::test(fs.clone(), [path!("/a").as_ref()], cx).await;
4695 let thread_store = cx.new(|cx| ThreadStore::new(cx));
4696 let agent = cx.update(|cx| {
4697 NativeAgent::new(thread_store.clone(), Templates::new(), None, fs.clone(), cx)
4698 });
4699 let connection = Rc::new(NativeAgentConnection(agent.clone()));
4700
4701 let acp_thread = cx
4702 .update(|cx| {
4703 connection
4704 .clone()
4705 .new_session(project.clone(), PathList::new(&[Path::new("")]), cx)
4706 })
4707 .await
4708 .unwrap();
4709 let session_id = acp_thread.read_with(cx, |thread, _| thread.session_id().clone());
4710 let thread = agent.read_with(cx, |agent, _| {
4711 agent.sessions.get(&session_id).unwrap().thread.clone()
4712 });
4713 let model = Arc::new(FakeLanguageModel::default());
4714
4715 // Ensure empty threads are not saved, even if they get mutated.
4716 thread.update(cx, |thread, cx| {
4717 thread.set_model(model.clone(), cx);
4718 });
4719 cx.run_until_parked();
4720
4721 let send = acp_thread.update(cx, |thread, cx| thread.send_raw("Prompt", cx));
4722 cx.run_until_parked();
4723 model.send_last_completion_stream_text_chunk("spawning subagent");
4724 let subagent_tool_input = SpawnAgentToolInput {
4725 label: "label".to_string(),
4726 message: "subagent task prompt".to_string(),
4727 session_id: None,
4728 };
4729 let subagent_tool_use = LanguageModelToolUse {
4730 id: "subagent_1".into(),
4731 name: SpawnAgentTool::NAME.into(),
4732 raw_input: serde_json::to_string(&subagent_tool_input).unwrap(),
4733 input: serde_json::to_value(&subagent_tool_input).unwrap(),
4734 is_input_complete: true,
4735 thought_signature: None,
4736 };
4737 model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
4738 subagent_tool_use,
4739 ));
4740 model.end_last_completion_stream();
4741
4742 cx.run_until_parked();
4743
4744 let subagent_session_id = thread.read_with(cx, |thread, cx| {
4745 thread
4746 .running_subagent_ids(cx)
4747 .get(0)
4748 .expect("subagent thread should be running")
4749 .clone()
4750 });
4751
4752 let subagent_thread = agent.read_with(cx, |agent, _cx| {
4753 agent
4754 .sessions
4755 .get(&subagent_session_id)
4756 .expect("subagent session should exist")
4757 .acp_thread
4758 .clone()
4759 });
4760
4761 model.send_last_completion_stream_text_chunk("subagent task response");
4762 model.end_last_completion_stream();
4763
4764 cx.run_until_parked();
4765
4766 assert_eq!(
4767 subagent_thread.read_with(cx, |thread, cx| thread.to_markdown(cx)),
4768 indoc! {"
4769 ## User
4770
4771 subagent task prompt
4772
4773 ## Assistant
4774
4775 subagent task response
4776
4777 "}
4778 );
4779
4780 model.send_last_completion_stream_text_chunk("Response");
4781 model.end_last_completion_stream();
4782
4783 send.await.unwrap();
4784
4785 assert_eq!(
4786 acp_thread.read_with(cx, |thread, cx| thread.to_markdown(cx)),
4787 indoc! {r#"
4788 ## User
4789
4790 Prompt
4791
4792 ## Assistant
4793
4794 spawning subagent
4795
4796 **Tool Call: label**
4797 Status: Completed
4798
4799 subagent task response
4800
4801 ## Assistant
4802
4803 Response
4804
4805 "#},
4806 );
4807}
4808
4809#[gpui::test]
4810async fn test_subagent_tool_output_does_not_include_thinking(cx: &mut TestAppContext) {
4811 init_test(cx);
4812 cx.update(|cx| {
4813 LanguageModelRegistry::test(cx);
4814 });
4815 cx.update(|cx| {
4816 cx.update_flags(true, vec!["subagents".to_string()]);
4817 });
4818
4819 let fs = FakeFs::new(cx.executor());
4820 fs.insert_tree(
4821 "/",
4822 json!({
4823 "a": {
4824 "b.md": "Lorem"
4825 }
4826 }),
4827 )
4828 .await;
4829 let project = Project::test(fs.clone(), [path!("/a").as_ref()], cx).await;
4830 let thread_store = cx.new(|cx| ThreadStore::new(cx));
4831 let agent = cx.update(|cx| {
4832 NativeAgent::new(thread_store.clone(), Templates::new(), None, fs.clone(), cx)
4833 });
4834 let connection = Rc::new(NativeAgentConnection(agent.clone()));
4835
4836 let acp_thread = cx
4837 .update(|cx| {
4838 connection
4839 .clone()
4840 .new_session(project.clone(), PathList::new(&[Path::new("")]), cx)
4841 })
4842 .await
4843 .unwrap();
4844 let session_id = acp_thread.read_with(cx, |thread, _| thread.session_id().clone());
4845 let thread = agent.read_with(cx, |agent, _| {
4846 agent.sessions.get(&session_id).unwrap().thread.clone()
4847 });
4848 let model = Arc::new(FakeLanguageModel::default());
4849
4850 // Ensure empty threads are not saved, even if they get mutated.
4851 thread.update(cx, |thread, cx| {
4852 thread.set_model(model.clone(), cx);
4853 });
4854 cx.run_until_parked();
4855
4856 let send = acp_thread.update(cx, |thread, cx| thread.send_raw("Prompt", cx));
4857 cx.run_until_parked();
4858 model.send_last_completion_stream_text_chunk("spawning subagent");
4859 let subagent_tool_input = SpawnAgentToolInput {
4860 label: "label".to_string(),
4861 message: "subagent task prompt".to_string(),
4862 session_id: None,
4863 };
4864 let subagent_tool_use = LanguageModelToolUse {
4865 id: "subagent_1".into(),
4866 name: SpawnAgentTool::NAME.into(),
4867 raw_input: serde_json::to_string(&subagent_tool_input).unwrap(),
4868 input: serde_json::to_value(&subagent_tool_input).unwrap(),
4869 is_input_complete: true,
4870 thought_signature: None,
4871 };
4872 model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
4873 subagent_tool_use,
4874 ));
4875 model.end_last_completion_stream();
4876
4877 cx.run_until_parked();
4878
4879 let subagent_session_id = thread.read_with(cx, |thread, cx| {
4880 thread
4881 .running_subagent_ids(cx)
4882 .get(0)
4883 .expect("subagent thread should be running")
4884 .clone()
4885 });
4886
4887 let subagent_thread = agent.read_with(cx, |agent, _cx| {
4888 agent
4889 .sessions
4890 .get(&subagent_session_id)
4891 .expect("subagent session should exist")
4892 .acp_thread
4893 .clone()
4894 });
4895
4896 model.send_last_completion_stream_text_chunk("subagent task response 1");
4897 model.send_last_completion_stream_event(LanguageModelCompletionEvent::Thinking {
4898 text: "thinking more about the subagent task".into(),
4899 signature: None,
4900 });
4901 model.send_last_completion_stream_text_chunk("subagent task response 2");
4902 model.end_last_completion_stream();
4903
4904 cx.run_until_parked();
4905
4906 assert_eq!(
4907 subagent_thread.read_with(cx, |thread, cx| thread.to_markdown(cx)),
4908 indoc! {"
4909 ## User
4910
4911 subagent task prompt
4912
4913 ## Assistant
4914
4915 subagent task response 1
4916
4917 <thinking>
4918 thinking more about the subagent task
4919 </thinking>
4920
4921 subagent task response 2
4922
4923 "}
4924 );
4925
4926 model.send_last_completion_stream_text_chunk("Response");
4927 model.end_last_completion_stream();
4928
4929 send.await.unwrap();
4930
4931 assert_eq!(
4932 acp_thread.read_with(cx, |thread, cx| thread.to_markdown(cx)),
4933 indoc! {r#"
4934 ## User
4935
4936 Prompt
4937
4938 ## Assistant
4939
4940 spawning subagent
4941
4942 **Tool Call: label**
4943 Status: Completed
4944
4945 subagent task response 1
4946
4947 subagent task response 2
4948
4949 ## Assistant
4950
4951 Response
4952
4953 "#},
4954 );
4955}
4956
4957#[gpui::test]
4958async fn test_subagent_tool_call_cancellation_during_task_prompt(cx: &mut TestAppContext) {
4959 init_test(cx);
4960 cx.update(|cx| {
4961 LanguageModelRegistry::test(cx);
4962 });
4963 cx.update(|cx| {
4964 cx.update_flags(true, vec!["subagents".to_string()]);
4965 });
4966
4967 let fs = FakeFs::new(cx.executor());
4968 fs.insert_tree(
4969 "/",
4970 json!({
4971 "a": {
4972 "b.md": "Lorem"
4973 }
4974 }),
4975 )
4976 .await;
4977 let project = Project::test(fs.clone(), [path!("/a").as_ref()], cx).await;
4978 let thread_store = cx.new(|cx| ThreadStore::new(cx));
4979 let agent = cx.update(|cx| {
4980 NativeAgent::new(thread_store.clone(), Templates::new(), None, fs.clone(), cx)
4981 });
4982 let connection = Rc::new(NativeAgentConnection(agent.clone()));
4983
4984 let acp_thread = cx
4985 .update(|cx| {
4986 connection
4987 .clone()
4988 .new_session(project.clone(), PathList::new(&[Path::new("")]), cx)
4989 })
4990 .await
4991 .unwrap();
4992 let session_id = acp_thread.read_with(cx, |thread, _| thread.session_id().clone());
4993 let thread = agent.read_with(cx, |agent, _| {
4994 agent.sessions.get(&session_id).unwrap().thread.clone()
4995 });
4996 let model = Arc::new(FakeLanguageModel::default());
4997
4998 // Ensure empty threads are not saved, even if they get mutated.
4999 thread.update(cx, |thread, cx| {
5000 thread.set_model(model.clone(), cx);
5001 });
5002 cx.run_until_parked();
5003
5004 let send = acp_thread.update(cx, |thread, cx| thread.send_raw("Prompt", cx));
5005 cx.run_until_parked();
5006 model.send_last_completion_stream_text_chunk("spawning subagent");
5007 let subagent_tool_input = SpawnAgentToolInput {
5008 label: "label".to_string(),
5009 message: "subagent task prompt".to_string(),
5010 session_id: None,
5011 };
5012 let subagent_tool_use = LanguageModelToolUse {
5013 id: "subagent_1".into(),
5014 name: SpawnAgentTool::NAME.into(),
5015 raw_input: serde_json::to_string(&subagent_tool_input).unwrap(),
5016 input: serde_json::to_value(&subagent_tool_input).unwrap(),
5017 is_input_complete: true,
5018 thought_signature: None,
5019 };
5020 model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
5021 subagent_tool_use,
5022 ));
5023 model.end_last_completion_stream();
5024
5025 cx.run_until_parked();
5026
5027 let subagent_session_id = thread.read_with(cx, |thread, cx| {
5028 thread
5029 .running_subagent_ids(cx)
5030 .get(0)
5031 .expect("subagent thread should be running")
5032 .clone()
5033 });
5034 let subagent_acp_thread = agent.read_with(cx, |agent, _cx| {
5035 agent
5036 .sessions
5037 .get(&subagent_session_id)
5038 .expect("subagent session should exist")
5039 .acp_thread
5040 .clone()
5041 });
5042
5043 // model.send_last_completion_stream_text_chunk("subagent task response");
5044 // model.end_last_completion_stream();
5045
5046 // cx.run_until_parked();
5047
5048 acp_thread.update(cx, |thread, cx| thread.cancel(cx)).await;
5049
5050 cx.run_until_parked();
5051
5052 send.await.unwrap();
5053
5054 acp_thread.read_with(cx, |thread, cx| {
5055 assert_eq!(thread.status(), ThreadStatus::Idle);
5056 assert_eq!(
5057 thread.to_markdown(cx),
5058 indoc! {"
5059 ## User
5060
5061 Prompt
5062
5063 ## Assistant
5064
5065 spawning subagent
5066
5067 **Tool Call: label**
5068 Status: Canceled
5069
5070 "}
5071 );
5072 });
5073 subagent_acp_thread.read_with(cx, |thread, cx| {
5074 assert_eq!(thread.status(), ThreadStatus::Idle);
5075 assert_eq!(
5076 thread.to_markdown(cx),
5077 indoc! {"
5078 ## User
5079
5080 subagent task prompt
5081
5082 "}
5083 );
5084 });
5085}
5086
5087#[gpui::test]
5088async fn test_subagent_tool_resume_session(cx: &mut TestAppContext) {
5089 init_test(cx);
5090 cx.update(|cx| {
5091 LanguageModelRegistry::test(cx);
5092 });
5093 cx.update(|cx| {
5094 cx.update_flags(true, vec!["subagents".to_string()]);
5095 });
5096
5097 let fs = FakeFs::new(cx.executor());
5098 fs.insert_tree(
5099 "/",
5100 json!({
5101 "a": {
5102 "b.md": "Lorem"
5103 }
5104 }),
5105 )
5106 .await;
5107 let project = Project::test(fs.clone(), [path!("/a").as_ref()], cx).await;
5108 let thread_store = cx.new(|cx| ThreadStore::new(cx));
5109 let agent = cx.update(|cx| {
5110 NativeAgent::new(thread_store.clone(), Templates::new(), None, fs.clone(), cx)
5111 });
5112 let connection = Rc::new(NativeAgentConnection(agent.clone()));
5113
5114 let acp_thread = cx
5115 .update(|cx| {
5116 connection
5117 .clone()
5118 .new_session(project.clone(), PathList::new(&[Path::new("")]), cx)
5119 })
5120 .await
5121 .unwrap();
5122 let session_id = acp_thread.read_with(cx, |thread, _| thread.session_id().clone());
5123 let thread = agent.read_with(cx, |agent, _| {
5124 agent.sessions.get(&session_id).unwrap().thread.clone()
5125 });
5126 let model = Arc::new(FakeLanguageModel::default());
5127
5128 thread.update(cx, |thread, cx| {
5129 thread.set_model(model.clone(), cx);
5130 });
5131 cx.run_until_parked();
5132
5133 // === First turn: create subagent ===
5134 let send = acp_thread.update(cx, |thread, cx| thread.send_raw("First prompt", cx));
5135 cx.run_until_parked();
5136 model.send_last_completion_stream_text_chunk("spawning subagent");
5137 let subagent_tool_input = SpawnAgentToolInput {
5138 label: "initial task".to_string(),
5139 message: "do the first task".to_string(),
5140 session_id: None,
5141 };
5142 let subagent_tool_use = LanguageModelToolUse {
5143 id: "subagent_1".into(),
5144 name: SpawnAgentTool::NAME.into(),
5145 raw_input: serde_json::to_string(&subagent_tool_input).unwrap(),
5146 input: serde_json::to_value(&subagent_tool_input).unwrap(),
5147 is_input_complete: true,
5148 thought_signature: None,
5149 };
5150 model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
5151 subagent_tool_use,
5152 ));
5153 model.end_last_completion_stream();
5154
5155 cx.run_until_parked();
5156
5157 let subagent_session_id = thread.read_with(cx, |thread, cx| {
5158 thread
5159 .running_subagent_ids(cx)
5160 .get(0)
5161 .expect("subagent thread should be running")
5162 .clone()
5163 });
5164
5165 let subagent_acp_thread = agent.read_with(cx, |agent, _cx| {
5166 agent
5167 .sessions
5168 .get(&subagent_session_id)
5169 .expect("subagent session should exist")
5170 .acp_thread
5171 .clone()
5172 });
5173
5174 // Subagent responds
5175 model.send_last_completion_stream_text_chunk("first task response");
5176 model.end_last_completion_stream();
5177
5178 cx.run_until_parked();
5179
5180 // Parent model responds to complete first turn
5181 model.send_last_completion_stream_text_chunk("First response");
5182 model.end_last_completion_stream();
5183
5184 send.await.unwrap();
5185
5186 // Verify subagent is no longer running
5187 thread.read_with(cx, |thread, cx| {
5188 assert!(
5189 thread.running_subagent_ids(cx).is_empty(),
5190 "subagent should not be running after completion"
5191 );
5192 });
5193
5194 // === Second turn: resume subagent with session_id ===
5195 let send2 = acp_thread.update(cx, |thread, cx| thread.send_raw("Follow up", cx));
5196 cx.run_until_parked();
5197 model.send_last_completion_stream_text_chunk("resuming subagent");
5198 let resume_tool_input = SpawnAgentToolInput {
5199 label: "follow-up task".to_string(),
5200 message: "do the follow-up task".to_string(),
5201 session_id: Some(subagent_session_id.clone()),
5202 };
5203 let resume_tool_use = LanguageModelToolUse {
5204 id: "subagent_2".into(),
5205 name: SpawnAgentTool::NAME.into(),
5206 raw_input: serde_json::to_string(&resume_tool_input).unwrap(),
5207 input: serde_json::to_value(&resume_tool_input).unwrap(),
5208 is_input_complete: true,
5209 thought_signature: None,
5210 };
5211 model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(resume_tool_use));
5212 model.end_last_completion_stream();
5213
5214 cx.run_until_parked();
5215
5216 // Subagent should be running again with the same session
5217 thread.read_with(cx, |thread, cx| {
5218 let running = thread.running_subagent_ids(cx);
5219 assert_eq!(running.len(), 1, "subagent should be running");
5220 assert_eq!(running[0], subagent_session_id, "should be same session");
5221 });
5222
5223 // Subagent responds to follow-up
5224 model.send_last_completion_stream_text_chunk("follow-up task response");
5225 model.end_last_completion_stream();
5226
5227 cx.run_until_parked();
5228
5229 // Parent model responds to complete second turn
5230 model.send_last_completion_stream_text_chunk("Second response");
5231 model.end_last_completion_stream();
5232
5233 send2.await.unwrap();
5234
5235 // Verify subagent is no longer running
5236 thread.read_with(cx, |thread, cx| {
5237 assert!(
5238 thread.running_subagent_ids(cx).is_empty(),
5239 "subagent should not be running after resume completion"
5240 );
5241 });
5242
5243 // Verify the subagent's acp thread has both conversation turns
5244 assert_eq!(
5245 subagent_acp_thread.read_with(cx, |thread, cx| thread.to_markdown(cx)),
5246 indoc! {"
5247 ## User
5248
5249 do the first task
5250
5251 ## Assistant
5252
5253 first task response
5254
5255 ## User
5256
5257 do the follow-up task
5258
5259 ## Assistant
5260
5261 follow-up task response
5262
5263 "}
5264 );
5265}
5266
5267#[gpui::test]
5268async fn test_subagent_thread_inherits_parent_thread_properties(cx: &mut TestAppContext) {
5269 init_test(cx);
5270
5271 cx.update(|cx| {
5272 cx.update_flags(true, vec!["subagents".to_string()]);
5273 });
5274
5275 let fs = FakeFs::new(cx.executor());
5276 fs.insert_tree(path!("/test"), json!({})).await;
5277 let project = Project::test(fs, [path!("/test").as_ref()], cx).await;
5278 let project_context = cx.new(|_cx| ProjectContext::default());
5279 let context_server_store = project.read_with(cx, |project, _| project.context_server_store());
5280 let context_server_registry =
5281 cx.new(|cx| ContextServerRegistry::new(context_server_store.clone(), cx));
5282 let model = Arc::new(FakeLanguageModel::default());
5283
5284 let parent_thread = cx.new(|cx| {
5285 Thread::new(
5286 project.clone(),
5287 project_context,
5288 context_server_registry,
5289 Templates::new(),
5290 Some(model.clone()),
5291 cx,
5292 )
5293 });
5294
5295 let subagent_thread = cx.new(|cx| Thread::new_subagent(&parent_thread, cx));
5296 subagent_thread.read_with(cx, |subagent_thread, cx| {
5297 assert!(subagent_thread.is_subagent());
5298 assert_eq!(subagent_thread.depth(), 1);
5299 assert_eq!(
5300 subagent_thread.model().map(|model| model.id()),
5301 Some(model.id())
5302 );
5303 assert_eq!(
5304 subagent_thread.parent_thread_id(),
5305 Some(parent_thread.read(cx).id().clone())
5306 );
5307
5308 let request = subagent_thread
5309 .build_completion_request(CompletionIntent::UserPrompt, cx)
5310 .unwrap();
5311 assert_eq!(request.intent, Some(CompletionIntent::Subagent));
5312 });
5313}
5314
5315#[gpui::test]
5316async fn test_max_subagent_depth_prevents_tool_registration(cx: &mut TestAppContext) {
5317 init_test(cx);
5318
5319 cx.update(|cx| {
5320 cx.update_flags(true, vec!["subagents".to_string()]);
5321 });
5322
5323 let fs = FakeFs::new(cx.executor());
5324 fs.insert_tree(path!("/test"), json!({})).await;
5325 let project = Project::test(fs, [path!("/test").as_ref()], cx).await;
5326 let project_context = cx.new(|_cx| ProjectContext::default());
5327 let context_server_store = project.read_with(cx, |project, _| project.context_server_store());
5328 let context_server_registry =
5329 cx.new(|cx| ContextServerRegistry::new(context_server_store.clone(), cx));
5330 let model = Arc::new(FakeLanguageModel::default());
5331 let environment = Rc::new(cx.update(|cx| {
5332 FakeThreadEnvironment::default().with_terminal(FakeTerminalHandle::new_never_exits(cx))
5333 }));
5334
5335 let deep_parent_thread = cx.new(|cx| {
5336 let mut thread = Thread::new(
5337 project.clone(),
5338 project_context,
5339 context_server_registry,
5340 Templates::new(),
5341 Some(model.clone()),
5342 cx,
5343 );
5344 thread.set_subagent_context(SubagentContext {
5345 parent_thread_id: agent_client_protocol::SessionId::new("parent-id"),
5346 depth: MAX_SUBAGENT_DEPTH - 1,
5347 });
5348 thread
5349 });
5350 let deep_subagent_thread = cx.new(|cx| {
5351 let mut thread = Thread::new_subagent(&deep_parent_thread, cx);
5352 thread.add_default_tools(environment, cx);
5353 thread
5354 });
5355
5356 deep_subagent_thread.read_with(cx, |thread, _| {
5357 assert_eq!(thread.depth(), MAX_SUBAGENT_DEPTH);
5358 assert!(
5359 !thread.has_registered_tool(SpawnAgentTool::NAME),
5360 "subagent tool should not be present at max depth"
5361 );
5362 });
5363}
5364
5365#[gpui::test]
5366async fn test_parent_cancel_stops_subagent(cx: &mut TestAppContext) {
5367 init_test(cx);
5368
5369 cx.update(|cx| {
5370 cx.update_flags(true, vec!["subagents".to_string()]);
5371 });
5372
5373 let fs = FakeFs::new(cx.executor());
5374 fs.insert_tree(path!("/test"), json!({})).await;
5375 let project = Project::test(fs, [path!("/test").as_ref()], cx).await;
5376 let project_context = cx.new(|_cx| ProjectContext::default());
5377 let context_server_store = project.read_with(cx, |project, _| project.context_server_store());
5378 let context_server_registry =
5379 cx.new(|cx| ContextServerRegistry::new(context_server_store.clone(), cx));
5380 let model = Arc::new(FakeLanguageModel::default());
5381
5382 let parent = cx.new(|cx| {
5383 Thread::new(
5384 project.clone(),
5385 project_context.clone(),
5386 context_server_registry.clone(),
5387 Templates::new(),
5388 Some(model.clone()),
5389 cx,
5390 )
5391 });
5392
5393 let subagent = cx.new(|cx| Thread::new_subagent(&parent, cx));
5394
5395 parent.update(cx, |thread, _cx| {
5396 thread.register_running_subagent(subagent.downgrade());
5397 });
5398
5399 subagent
5400 .update(cx, |thread, cx| {
5401 thread.send(UserMessageId::new(), ["Do work".to_string()], cx)
5402 })
5403 .unwrap();
5404 cx.run_until_parked();
5405
5406 subagent.read_with(cx, |thread, _| {
5407 assert!(!thread.is_turn_complete(), "subagent should be running");
5408 });
5409
5410 parent.update(cx, |thread, cx| {
5411 thread.cancel(cx).detach();
5412 });
5413
5414 subagent.read_with(cx, |thread, _| {
5415 assert!(
5416 thread.is_turn_complete(),
5417 "subagent should be cancelled when parent cancels"
5418 );
5419 });
5420}
5421
5422#[gpui::test]
5423async fn test_subagent_context_window_warning(cx: &mut TestAppContext) {
5424 init_test(cx);
5425 cx.update(|cx| {
5426 LanguageModelRegistry::test(cx);
5427 });
5428 cx.update(|cx| {
5429 cx.update_flags(true, vec!["subagents".to_string()]);
5430 });
5431
5432 let fs = FakeFs::new(cx.executor());
5433 fs.insert_tree(
5434 "/",
5435 json!({
5436 "a": {
5437 "b.md": "Lorem"
5438 }
5439 }),
5440 )
5441 .await;
5442 let project = Project::test(fs.clone(), [path!("/a").as_ref()], cx).await;
5443 let thread_store = cx.new(|cx| ThreadStore::new(cx));
5444 let agent = cx.update(|cx| {
5445 NativeAgent::new(thread_store.clone(), Templates::new(), None, fs.clone(), cx)
5446 });
5447 let connection = Rc::new(NativeAgentConnection(agent.clone()));
5448
5449 let acp_thread = cx
5450 .update(|cx| {
5451 connection
5452 .clone()
5453 .new_session(project.clone(), PathList::new(&[Path::new("")]), cx)
5454 })
5455 .await
5456 .unwrap();
5457 let session_id = acp_thread.read_with(cx, |thread, _| thread.session_id().clone());
5458 let thread = agent.read_with(cx, |agent, _| {
5459 agent.sessions.get(&session_id).unwrap().thread.clone()
5460 });
5461 let model = Arc::new(FakeLanguageModel::default());
5462
5463 thread.update(cx, |thread, cx| {
5464 thread.set_model(model.clone(), cx);
5465 });
5466 cx.run_until_parked();
5467
5468 // Start the parent turn
5469 let send = acp_thread.update(cx, |thread, cx| thread.send_raw("Prompt", cx));
5470 cx.run_until_parked();
5471 model.send_last_completion_stream_text_chunk("spawning subagent");
5472 let subagent_tool_input = SpawnAgentToolInput {
5473 label: "label".to_string(),
5474 message: "subagent task prompt".to_string(),
5475 session_id: None,
5476 };
5477 let subagent_tool_use = LanguageModelToolUse {
5478 id: "subagent_1".into(),
5479 name: SpawnAgentTool::NAME.into(),
5480 raw_input: serde_json::to_string(&subagent_tool_input).unwrap(),
5481 input: serde_json::to_value(&subagent_tool_input).unwrap(),
5482 is_input_complete: true,
5483 thought_signature: None,
5484 };
5485 model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
5486 subagent_tool_use,
5487 ));
5488 model.end_last_completion_stream();
5489
5490 cx.run_until_parked();
5491
5492 // Verify subagent is running
5493 let subagent_session_id = thread.read_with(cx, |thread, cx| {
5494 thread
5495 .running_subagent_ids(cx)
5496 .get(0)
5497 .expect("subagent thread should be running")
5498 .clone()
5499 });
5500
5501 // Send a usage update that crosses the warning threshold (80% of 1,000,000)
5502 model.send_last_completion_stream_text_chunk("partial work");
5503 model.send_last_completion_stream_event(LanguageModelCompletionEvent::UsageUpdate(
5504 TokenUsage {
5505 input_tokens: 850_000,
5506 output_tokens: 0,
5507 cache_creation_input_tokens: 0,
5508 cache_read_input_tokens: 0,
5509 },
5510 ));
5511
5512 cx.run_until_parked();
5513
5514 // The subagent should no longer be running
5515 thread.read_with(cx, |thread, cx| {
5516 assert!(
5517 thread.running_subagent_ids(cx).is_empty(),
5518 "subagent should be stopped after context window warning"
5519 );
5520 });
5521
5522 // The parent model should get a new completion request to respond to the tool error
5523 model.send_last_completion_stream_text_chunk("Response after warning");
5524 model.end_last_completion_stream();
5525
5526 send.await.unwrap();
5527
5528 // Verify the parent thread shows the warning error in the tool call
5529 let markdown = acp_thread.read_with(cx, |thread, cx| thread.to_markdown(cx));
5530 assert!(
5531 markdown.contains("nearing the end of its context window"),
5532 "tool output should contain context window warning message, got:\n{markdown}"
5533 );
5534 assert!(
5535 markdown.contains("Status: Failed"),
5536 "tool call should have Failed status, got:\n{markdown}"
5537 );
5538
5539 // Verify the subagent session still exists (can be resumed)
5540 agent.read_with(cx, |agent, _cx| {
5541 assert!(
5542 agent.sessions.contains_key(&subagent_session_id),
5543 "subagent session should still exist for potential resume"
5544 );
5545 });
5546}
5547
5548#[gpui::test]
5549async fn test_subagent_no_context_window_warning_when_already_at_warning(cx: &mut TestAppContext) {
5550 init_test(cx);
5551 cx.update(|cx| {
5552 LanguageModelRegistry::test(cx);
5553 });
5554 cx.update(|cx| {
5555 cx.update_flags(true, vec!["subagents".to_string()]);
5556 });
5557
5558 let fs = FakeFs::new(cx.executor());
5559 fs.insert_tree(
5560 "/",
5561 json!({
5562 "a": {
5563 "b.md": "Lorem"
5564 }
5565 }),
5566 )
5567 .await;
5568 let project = Project::test(fs.clone(), [path!("/a").as_ref()], cx).await;
5569 let thread_store = cx.new(|cx| ThreadStore::new(cx));
5570 let agent = cx.update(|cx| {
5571 NativeAgent::new(thread_store.clone(), Templates::new(), None, fs.clone(), cx)
5572 });
5573 let connection = Rc::new(NativeAgentConnection(agent.clone()));
5574
5575 let acp_thread = cx
5576 .update(|cx| {
5577 connection
5578 .clone()
5579 .new_session(project.clone(), PathList::new(&[Path::new("")]), cx)
5580 })
5581 .await
5582 .unwrap();
5583 let session_id = acp_thread.read_with(cx, |thread, _| thread.session_id().clone());
5584 let thread = agent.read_with(cx, |agent, _| {
5585 agent.sessions.get(&session_id).unwrap().thread.clone()
5586 });
5587 let model = Arc::new(FakeLanguageModel::default());
5588
5589 thread.update(cx, |thread, cx| {
5590 thread.set_model(model.clone(), cx);
5591 });
5592 cx.run_until_parked();
5593
5594 // === First turn: create subagent, trigger context window warning ===
5595 let send = acp_thread.update(cx, |thread, cx| thread.send_raw("First prompt", cx));
5596 cx.run_until_parked();
5597 model.send_last_completion_stream_text_chunk("spawning subagent");
5598 let subagent_tool_input = SpawnAgentToolInput {
5599 label: "initial task".to_string(),
5600 message: "do the first task".to_string(),
5601 session_id: None,
5602 };
5603 let subagent_tool_use = LanguageModelToolUse {
5604 id: "subagent_1".into(),
5605 name: SpawnAgentTool::NAME.into(),
5606 raw_input: serde_json::to_string(&subagent_tool_input).unwrap(),
5607 input: serde_json::to_value(&subagent_tool_input).unwrap(),
5608 is_input_complete: true,
5609 thought_signature: None,
5610 };
5611 model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
5612 subagent_tool_use,
5613 ));
5614 model.end_last_completion_stream();
5615
5616 cx.run_until_parked();
5617
5618 let subagent_session_id = thread.read_with(cx, |thread, cx| {
5619 thread
5620 .running_subagent_ids(cx)
5621 .get(0)
5622 .expect("subagent thread should be running")
5623 .clone()
5624 });
5625
5626 // Subagent sends a usage update that crosses the warning threshold.
5627 // This triggers Normal→Warning, stopping the subagent.
5628 model.send_last_completion_stream_text_chunk("partial work");
5629 model.send_last_completion_stream_event(LanguageModelCompletionEvent::UsageUpdate(
5630 TokenUsage {
5631 input_tokens: 850_000,
5632 output_tokens: 0,
5633 cache_creation_input_tokens: 0,
5634 cache_read_input_tokens: 0,
5635 },
5636 ));
5637
5638 cx.run_until_parked();
5639
5640 // Verify the first turn was stopped with a context window warning
5641 thread.read_with(cx, |thread, cx| {
5642 assert!(
5643 thread.running_subagent_ids(cx).is_empty(),
5644 "subagent should be stopped after context window warning"
5645 );
5646 });
5647
5648 // Parent model responds to complete first turn
5649 model.send_last_completion_stream_text_chunk("First response");
5650 model.end_last_completion_stream();
5651
5652 send.await.unwrap();
5653
5654 let markdown = acp_thread.read_with(cx, |thread, cx| thread.to_markdown(cx));
5655 assert!(
5656 markdown.contains("nearing the end of its context window"),
5657 "first turn should have context window warning, got:\n{markdown}"
5658 );
5659
5660 // === Second turn: resume the same subagent (now at Warning level) ===
5661 let send2 = acp_thread.update(cx, |thread, cx| thread.send_raw("Follow up", cx));
5662 cx.run_until_parked();
5663 model.send_last_completion_stream_text_chunk("resuming subagent");
5664 let resume_tool_input = SpawnAgentToolInput {
5665 label: "follow-up task".to_string(),
5666 message: "do the follow-up task".to_string(),
5667 session_id: Some(subagent_session_id.clone()),
5668 };
5669 let resume_tool_use = LanguageModelToolUse {
5670 id: "subagent_2".into(),
5671 name: SpawnAgentTool::NAME.into(),
5672 raw_input: serde_json::to_string(&resume_tool_input).unwrap(),
5673 input: serde_json::to_value(&resume_tool_input).unwrap(),
5674 is_input_complete: true,
5675 thought_signature: None,
5676 };
5677 model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(resume_tool_use));
5678 model.end_last_completion_stream();
5679
5680 cx.run_until_parked();
5681
5682 // Subagent responds with tokens still at warning level (no worse).
5683 // Since ratio_before_prompt was already Warning, this should NOT
5684 // trigger the context window warning again.
5685 model.send_last_completion_stream_text_chunk("follow-up task response");
5686 model.send_last_completion_stream_event(LanguageModelCompletionEvent::UsageUpdate(
5687 TokenUsage {
5688 input_tokens: 870_000,
5689 output_tokens: 0,
5690 cache_creation_input_tokens: 0,
5691 cache_read_input_tokens: 0,
5692 },
5693 ));
5694 model.end_last_completion_stream();
5695
5696 cx.run_until_parked();
5697
5698 // Parent model responds to complete second turn
5699 model.send_last_completion_stream_text_chunk("Second response");
5700 model.end_last_completion_stream();
5701
5702 send2.await.unwrap();
5703
5704 // The resumed subagent should have completed normally since the ratio
5705 // didn't transition (it was Warning before and stayed at Warning)
5706 let markdown = acp_thread.read_with(cx, |thread, cx| thread.to_markdown(cx));
5707 assert!(
5708 markdown.contains("follow-up task response"),
5709 "resumed subagent should complete normally when already at warning, got:\n{markdown}"
5710 );
5711 // The second tool call should NOT have a context window warning
5712 let second_tool_pos = markdown
5713 .find("follow-up task")
5714 .expect("should find follow-up tool call");
5715 let after_second_tool = &markdown[second_tool_pos..];
5716 assert!(
5717 !after_second_tool.contains("nearing the end of its context window"),
5718 "should NOT contain context window warning for resumed subagent at same level, got:\n{after_second_tool}"
5719 );
5720}
5721
5722#[gpui::test]
5723async fn test_subagent_error_propagation(cx: &mut TestAppContext) {
5724 init_test(cx);
5725 cx.update(|cx| {
5726 LanguageModelRegistry::test(cx);
5727 });
5728 cx.update(|cx| {
5729 cx.update_flags(true, vec!["subagents".to_string()]);
5730 });
5731
5732 let fs = FakeFs::new(cx.executor());
5733 fs.insert_tree(
5734 "/",
5735 json!({
5736 "a": {
5737 "b.md": "Lorem"
5738 }
5739 }),
5740 )
5741 .await;
5742 let project = Project::test(fs.clone(), [path!("/a").as_ref()], cx).await;
5743 let thread_store = cx.new(|cx| ThreadStore::new(cx));
5744 let agent = cx.update(|cx| {
5745 NativeAgent::new(thread_store.clone(), Templates::new(), None, fs.clone(), cx)
5746 });
5747 let connection = Rc::new(NativeAgentConnection(agent.clone()));
5748
5749 let acp_thread = cx
5750 .update(|cx| {
5751 connection
5752 .clone()
5753 .new_session(project.clone(), PathList::new(&[Path::new("")]), cx)
5754 })
5755 .await
5756 .unwrap();
5757 let session_id = acp_thread.read_with(cx, |thread, _| thread.session_id().clone());
5758 let thread = agent.read_with(cx, |agent, _| {
5759 agent.sessions.get(&session_id).unwrap().thread.clone()
5760 });
5761 let model = Arc::new(FakeLanguageModel::default());
5762
5763 thread.update(cx, |thread, cx| {
5764 thread.set_model(model.clone(), cx);
5765 });
5766 cx.run_until_parked();
5767
5768 // Start the parent turn
5769 let send = acp_thread.update(cx, |thread, cx| thread.send_raw("Prompt", cx));
5770 cx.run_until_parked();
5771 model.send_last_completion_stream_text_chunk("spawning subagent");
5772 let subagent_tool_input = SpawnAgentToolInput {
5773 label: "label".to_string(),
5774 message: "subagent task prompt".to_string(),
5775 session_id: None,
5776 };
5777 let subagent_tool_use = LanguageModelToolUse {
5778 id: "subagent_1".into(),
5779 name: SpawnAgentTool::NAME.into(),
5780 raw_input: serde_json::to_string(&subagent_tool_input).unwrap(),
5781 input: serde_json::to_value(&subagent_tool_input).unwrap(),
5782 is_input_complete: true,
5783 thought_signature: None,
5784 };
5785 model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
5786 subagent_tool_use,
5787 ));
5788 model.end_last_completion_stream();
5789
5790 cx.run_until_parked();
5791
5792 // Verify subagent is running
5793 thread.read_with(cx, |thread, cx| {
5794 assert!(
5795 !thread.running_subagent_ids(cx).is_empty(),
5796 "subagent should be running"
5797 );
5798 });
5799
5800 // The subagent's model returns a non-retryable error
5801 model.send_last_completion_stream_error(LanguageModelCompletionError::PromptTooLarge {
5802 tokens: None,
5803 });
5804
5805 cx.run_until_parked();
5806
5807 // The subagent should no longer be running
5808 thread.read_with(cx, |thread, cx| {
5809 assert!(
5810 thread.running_subagent_ids(cx).is_empty(),
5811 "subagent should not be running after error"
5812 );
5813 });
5814
5815 // The parent model should get a new completion request to respond to the tool error
5816 model.send_last_completion_stream_text_chunk("Response after error");
5817 model.end_last_completion_stream();
5818
5819 send.await.unwrap();
5820
5821 // Verify the parent thread shows the error in the tool call
5822 let markdown = acp_thread.read_with(cx, |thread, cx| thread.to_markdown(cx));
5823 assert!(
5824 markdown.contains("Status: Failed"),
5825 "tool call should have Failed status after model error, got:\n{markdown}"
5826 );
5827}
5828
5829#[gpui::test]
5830async fn test_edit_file_tool_deny_rule_blocks_edit(cx: &mut TestAppContext) {
5831 init_test(cx);
5832
5833 let fs = FakeFs::new(cx.executor());
5834 fs.insert_tree("/root", json!({"sensitive_config.txt": "secret data"}))
5835 .await;
5836 let project = Project::test(fs.clone(), ["/root".as_ref()], cx).await;
5837
5838 cx.update(|cx| {
5839 let mut settings = agent_settings::AgentSettings::get_global(cx).clone();
5840 settings.tool_permissions.tools.insert(
5841 EditFileTool::NAME.into(),
5842 agent_settings::ToolRules {
5843 default: Some(settings::ToolPermissionMode::Allow),
5844 always_allow: vec![],
5845 always_deny: vec![agent_settings::CompiledRegex::new(r"sensitive", false).unwrap()],
5846 always_confirm: vec![],
5847 invalid_patterns: vec![],
5848 },
5849 );
5850 agent_settings::AgentSettings::override_global(settings, cx);
5851 });
5852
5853 let context_server_registry =
5854 cx.new(|cx| crate::ContextServerRegistry::new(project.read(cx).context_server_store(), cx));
5855 let language_registry = project.read_with(cx, |project, _cx| project.languages().clone());
5856 let templates = crate::Templates::new();
5857 let thread = cx.new(|cx| {
5858 crate::Thread::new(
5859 project.clone(),
5860 cx.new(|_cx| prompt_store::ProjectContext::default()),
5861 context_server_registry,
5862 templates.clone(),
5863 None,
5864 cx,
5865 )
5866 });
5867
5868 #[allow(clippy::arc_with_non_send_sync)]
5869 let tool = Arc::new(crate::EditFileTool::new(
5870 project.clone(),
5871 thread.downgrade(),
5872 language_registry,
5873 templates,
5874 ));
5875 let (event_stream, _rx) = crate::ToolCallEventStream::test();
5876
5877 let task = cx.update(|cx| {
5878 tool.run(
5879 ToolInput::resolved(crate::EditFileToolInput {
5880 display_description: "Edit sensitive file".to_string(),
5881 path: "root/sensitive_config.txt".into(),
5882 mode: crate::EditFileMode::Edit,
5883 }),
5884 event_stream,
5885 cx,
5886 )
5887 });
5888
5889 let result = task.await;
5890 assert!(result.is_err(), "expected edit to be blocked");
5891 assert!(
5892 result.unwrap_err().to_string().contains("blocked"),
5893 "error should mention the edit was blocked"
5894 );
5895}
5896
5897#[gpui::test]
5898async fn test_delete_path_tool_deny_rule_blocks_deletion(cx: &mut TestAppContext) {
5899 init_test(cx);
5900
5901 let fs = FakeFs::new(cx.executor());
5902 fs.insert_tree("/root", json!({"important_data.txt": "critical info"}))
5903 .await;
5904 let project = Project::test(fs.clone(), ["/root".as_ref()], cx).await;
5905
5906 cx.update(|cx| {
5907 let mut settings = agent_settings::AgentSettings::get_global(cx).clone();
5908 settings.tool_permissions.tools.insert(
5909 DeletePathTool::NAME.into(),
5910 agent_settings::ToolRules {
5911 default: Some(settings::ToolPermissionMode::Allow),
5912 always_allow: vec![],
5913 always_deny: vec![agent_settings::CompiledRegex::new(r"important", false).unwrap()],
5914 always_confirm: vec![],
5915 invalid_patterns: vec![],
5916 },
5917 );
5918 agent_settings::AgentSettings::override_global(settings, cx);
5919 });
5920
5921 let action_log = cx.new(|_cx| action_log::ActionLog::new(project.clone()));
5922
5923 #[allow(clippy::arc_with_non_send_sync)]
5924 let tool = Arc::new(crate::DeletePathTool::new(project, action_log));
5925 let (event_stream, _rx) = crate::ToolCallEventStream::test();
5926
5927 let task = cx.update(|cx| {
5928 tool.run(
5929 ToolInput::resolved(crate::DeletePathToolInput {
5930 path: "root/important_data.txt".to_string(),
5931 }),
5932 event_stream,
5933 cx,
5934 )
5935 });
5936
5937 let result = task.await;
5938 assert!(result.is_err(), "expected deletion to be blocked");
5939 assert!(
5940 result.unwrap_err().contains("blocked"),
5941 "error should mention the deletion was blocked"
5942 );
5943}
5944
5945#[gpui::test]
5946async fn test_move_path_tool_denies_if_destination_denied(cx: &mut TestAppContext) {
5947 init_test(cx);
5948
5949 let fs = FakeFs::new(cx.executor());
5950 fs.insert_tree(
5951 "/root",
5952 json!({
5953 "safe.txt": "content",
5954 "protected": {}
5955 }),
5956 )
5957 .await;
5958 let project = Project::test(fs.clone(), ["/root".as_ref()], cx).await;
5959
5960 cx.update(|cx| {
5961 let mut settings = agent_settings::AgentSettings::get_global(cx).clone();
5962 settings.tool_permissions.tools.insert(
5963 MovePathTool::NAME.into(),
5964 agent_settings::ToolRules {
5965 default: Some(settings::ToolPermissionMode::Allow),
5966 always_allow: vec![],
5967 always_deny: vec![agent_settings::CompiledRegex::new(r"protected", false).unwrap()],
5968 always_confirm: vec![],
5969 invalid_patterns: vec![],
5970 },
5971 );
5972 agent_settings::AgentSettings::override_global(settings, cx);
5973 });
5974
5975 #[allow(clippy::arc_with_non_send_sync)]
5976 let tool = Arc::new(crate::MovePathTool::new(project));
5977 let (event_stream, _rx) = crate::ToolCallEventStream::test();
5978
5979 let task = cx.update(|cx| {
5980 tool.run(
5981 ToolInput::resolved(crate::MovePathToolInput {
5982 source_path: "root/safe.txt".to_string(),
5983 destination_path: "root/protected/safe.txt".to_string(),
5984 }),
5985 event_stream,
5986 cx,
5987 )
5988 });
5989
5990 let result = task.await;
5991 assert!(
5992 result.is_err(),
5993 "expected move to be blocked due to destination path"
5994 );
5995 assert!(
5996 result.unwrap_err().contains("blocked"),
5997 "error should mention the move was blocked"
5998 );
5999}
6000
6001#[gpui::test]
6002async fn test_move_path_tool_denies_if_source_denied(cx: &mut TestAppContext) {
6003 init_test(cx);
6004
6005 let fs = FakeFs::new(cx.executor());
6006 fs.insert_tree(
6007 "/root",
6008 json!({
6009 "secret.txt": "secret content",
6010 "public": {}
6011 }),
6012 )
6013 .await;
6014 let project = Project::test(fs.clone(), ["/root".as_ref()], cx).await;
6015
6016 cx.update(|cx| {
6017 let mut settings = agent_settings::AgentSettings::get_global(cx).clone();
6018 settings.tool_permissions.tools.insert(
6019 MovePathTool::NAME.into(),
6020 agent_settings::ToolRules {
6021 default: Some(settings::ToolPermissionMode::Allow),
6022 always_allow: vec![],
6023 always_deny: vec![agent_settings::CompiledRegex::new(r"secret", false).unwrap()],
6024 always_confirm: vec![],
6025 invalid_patterns: vec![],
6026 },
6027 );
6028 agent_settings::AgentSettings::override_global(settings, cx);
6029 });
6030
6031 #[allow(clippy::arc_with_non_send_sync)]
6032 let tool = Arc::new(crate::MovePathTool::new(project));
6033 let (event_stream, _rx) = crate::ToolCallEventStream::test();
6034
6035 let task = cx.update(|cx| {
6036 tool.run(
6037 ToolInput::resolved(crate::MovePathToolInput {
6038 source_path: "root/secret.txt".to_string(),
6039 destination_path: "root/public/not_secret.txt".to_string(),
6040 }),
6041 event_stream,
6042 cx,
6043 )
6044 });
6045
6046 let result = task.await;
6047 assert!(
6048 result.is_err(),
6049 "expected move to be blocked due to source path"
6050 );
6051 assert!(
6052 result.unwrap_err().contains("blocked"),
6053 "error should mention the move was blocked"
6054 );
6055}
6056
6057#[gpui::test]
6058async fn test_copy_path_tool_deny_rule_blocks_copy(cx: &mut TestAppContext) {
6059 init_test(cx);
6060
6061 let fs = FakeFs::new(cx.executor());
6062 fs.insert_tree(
6063 "/root",
6064 json!({
6065 "confidential.txt": "confidential data",
6066 "dest": {}
6067 }),
6068 )
6069 .await;
6070 let project = Project::test(fs.clone(), ["/root".as_ref()], cx).await;
6071
6072 cx.update(|cx| {
6073 let mut settings = agent_settings::AgentSettings::get_global(cx).clone();
6074 settings.tool_permissions.tools.insert(
6075 CopyPathTool::NAME.into(),
6076 agent_settings::ToolRules {
6077 default: Some(settings::ToolPermissionMode::Allow),
6078 always_allow: vec![],
6079 always_deny: vec![
6080 agent_settings::CompiledRegex::new(r"confidential", false).unwrap(),
6081 ],
6082 always_confirm: vec![],
6083 invalid_patterns: vec![],
6084 },
6085 );
6086 agent_settings::AgentSettings::override_global(settings, cx);
6087 });
6088
6089 #[allow(clippy::arc_with_non_send_sync)]
6090 let tool = Arc::new(crate::CopyPathTool::new(project));
6091 let (event_stream, _rx) = crate::ToolCallEventStream::test();
6092
6093 let task = cx.update(|cx| {
6094 tool.run(
6095 ToolInput::resolved(crate::CopyPathToolInput {
6096 source_path: "root/confidential.txt".to_string(),
6097 destination_path: "root/dest/copy.txt".to_string(),
6098 }),
6099 event_stream,
6100 cx,
6101 )
6102 });
6103
6104 let result = task.await;
6105 assert!(result.is_err(), "expected copy to be blocked");
6106 assert!(
6107 result.unwrap_err().contains("blocked"),
6108 "error should mention the copy was blocked"
6109 );
6110}
6111
6112#[gpui::test]
6113async fn test_save_file_tool_denies_if_any_path_denied(cx: &mut TestAppContext) {
6114 init_test(cx);
6115
6116 let fs = FakeFs::new(cx.executor());
6117 fs.insert_tree(
6118 "/root",
6119 json!({
6120 "normal.txt": "normal content",
6121 "readonly": {
6122 "config.txt": "readonly content"
6123 }
6124 }),
6125 )
6126 .await;
6127 let project = Project::test(fs.clone(), ["/root".as_ref()], cx).await;
6128
6129 cx.update(|cx| {
6130 let mut settings = agent_settings::AgentSettings::get_global(cx).clone();
6131 settings.tool_permissions.tools.insert(
6132 SaveFileTool::NAME.into(),
6133 agent_settings::ToolRules {
6134 default: Some(settings::ToolPermissionMode::Allow),
6135 always_allow: vec![],
6136 always_deny: vec![agent_settings::CompiledRegex::new(r"readonly", false).unwrap()],
6137 always_confirm: vec![],
6138 invalid_patterns: vec![],
6139 },
6140 );
6141 agent_settings::AgentSettings::override_global(settings, cx);
6142 });
6143
6144 #[allow(clippy::arc_with_non_send_sync)]
6145 let tool = Arc::new(crate::SaveFileTool::new(project));
6146 let (event_stream, _rx) = crate::ToolCallEventStream::test();
6147
6148 let task = cx.update(|cx| {
6149 tool.run(
6150 ToolInput::resolved(crate::SaveFileToolInput {
6151 paths: vec![
6152 std::path::PathBuf::from("root/normal.txt"),
6153 std::path::PathBuf::from("root/readonly/config.txt"),
6154 ],
6155 }),
6156 event_stream,
6157 cx,
6158 )
6159 });
6160
6161 let result = task.await;
6162 assert!(
6163 result.is_err(),
6164 "expected save to be blocked due to denied path"
6165 );
6166 assert!(
6167 result.unwrap_err().contains("blocked"),
6168 "error should mention the save was blocked"
6169 );
6170}
6171
6172#[gpui::test]
6173async fn test_save_file_tool_respects_deny_rules(cx: &mut TestAppContext) {
6174 init_test(cx);
6175
6176 let fs = FakeFs::new(cx.executor());
6177 fs.insert_tree("/root", json!({"config.secret": "secret config"}))
6178 .await;
6179 let project = Project::test(fs.clone(), ["/root".as_ref()], cx).await;
6180
6181 cx.update(|cx| {
6182 let mut settings = agent_settings::AgentSettings::get_global(cx).clone();
6183 settings.tool_permissions.tools.insert(
6184 SaveFileTool::NAME.into(),
6185 agent_settings::ToolRules {
6186 default: Some(settings::ToolPermissionMode::Allow),
6187 always_allow: vec![],
6188 always_deny: vec![agent_settings::CompiledRegex::new(r"\.secret$", false).unwrap()],
6189 always_confirm: vec![],
6190 invalid_patterns: vec![],
6191 },
6192 );
6193 agent_settings::AgentSettings::override_global(settings, cx);
6194 });
6195
6196 #[allow(clippy::arc_with_non_send_sync)]
6197 let tool = Arc::new(crate::SaveFileTool::new(project));
6198 let (event_stream, _rx) = crate::ToolCallEventStream::test();
6199
6200 let task = cx.update(|cx| {
6201 tool.run(
6202 ToolInput::resolved(crate::SaveFileToolInput {
6203 paths: vec![std::path::PathBuf::from("root/config.secret")],
6204 }),
6205 event_stream,
6206 cx,
6207 )
6208 });
6209
6210 let result = task.await;
6211 assert!(result.is_err(), "expected save to be blocked");
6212 assert!(
6213 result.unwrap_err().contains("blocked"),
6214 "error should mention the save was blocked"
6215 );
6216}
6217
6218#[gpui::test]
6219async fn test_web_search_tool_deny_rule_blocks_search(cx: &mut TestAppContext) {
6220 init_test(cx);
6221
6222 cx.update(|cx| {
6223 let mut settings = agent_settings::AgentSettings::get_global(cx).clone();
6224 settings.tool_permissions.tools.insert(
6225 WebSearchTool::NAME.into(),
6226 agent_settings::ToolRules {
6227 default: Some(settings::ToolPermissionMode::Allow),
6228 always_allow: vec![],
6229 always_deny: vec![
6230 agent_settings::CompiledRegex::new(r"internal\.company", false).unwrap(),
6231 ],
6232 always_confirm: vec![],
6233 invalid_patterns: vec![],
6234 },
6235 );
6236 agent_settings::AgentSettings::override_global(settings, cx);
6237 });
6238
6239 #[allow(clippy::arc_with_non_send_sync)]
6240 let tool = Arc::new(crate::WebSearchTool);
6241 let (event_stream, _rx) = crate::ToolCallEventStream::test();
6242
6243 let input: crate::WebSearchToolInput =
6244 serde_json::from_value(json!({"query": "internal.company.com secrets"})).unwrap();
6245
6246 let task = cx.update(|cx| tool.run(ToolInput::resolved(input), event_stream, cx));
6247
6248 let result = task.await;
6249 assert!(result.is_err(), "expected search to be blocked");
6250 match result.unwrap_err() {
6251 crate::WebSearchToolOutput::Error { error } => {
6252 assert!(
6253 error.contains("blocked"),
6254 "error should mention the search was blocked"
6255 );
6256 }
6257 other => panic!("expected Error variant, got: {other:?}"),
6258 }
6259}
6260
6261#[gpui::test]
6262async fn test_edit_file_tool_allow_rule_skips_confirmation(cx: &mut TestAppContext) {
6263 init_test(cx);
6264
6265 let fs = FakeFs::new(cx.executor());
6266 fs.insert_tree("/root", json!({"README.md": "# Hello"}))
6267 .await;
6268 let project = Project::test(fs.clone(), ["/root".as_ref()], cx).await;
6269
6270 cx.update(|cx| {
6271 let mut settings = agent_settings::AgentSettings::get_global(cx).clone();
6272 settings.tool_permissions.tools.insert(
6273 EditFileTool::NAME.into(),
6274 agent_settings::ToolRules {
6275 default: Some(settings::ToolPermissionMode::Confirm),
6276 always_allow: vec![agent_settings::CompiledRegex::new(r"\.md$", false).unwrap()],
6277 always_deny: vec![],
6278 always_confirm: vec![],
6279 invalid_patterns: vec![],
6280 },
6281 );
6282 agent_settings::AgentSettings::override_global(settings, cx);
6283 });
6284
6285 let context_server_registry =
6286 cx.new(|cx| crate::ContextServerRegistry::new(project.read(cx).context_server_store(), cx));
6287 let language_registry = project.read_with(cx, |project, _cx| project.languages().clone());
6288 let templates = crate::Templates::new();
6289 let thread = cx.new(|cx| {
6290 crate::Thread::new(
6291 project.clone(),
6292 cx.new(|_cx| prompt_store::ProjectContext::default()),
6293 context_server_registry,
6294 templates.clone(),
6295 None,
6296 cx,
6297 )
6298 });
6299
6300 #[allow(clippy::arc_with_non_send_sync)]
6301 let tool = Arc::new(crate::EditFileTool::new(
6302 project,
6303 thread.downgrade(),
6304 language_registry,
6305 templates,
6306 ));
6307 let (event_stream, mut rx) = crate::ToolCallEventStream::test();
6308
6309 let _task = cx.update(|cx| {
6310 tool.run(
6311 ToolInput::resolved(crate::EditFileToolInput {
6312 display_description: "Edit README".to_string(),
6313 path: "root/README.md".into(),
6314 mode: crate::EditFileMode::Edit,
6315 }),
6316 event_stream,
6317 cx,
6318 )
6319 });
6320
6321 cx.run_until_parked();
6322
6323 let event = rx.try_recv();
6324 assert!(
6325 !matches!(event, Ok(Ok(ThreadEvent::ToolCallAuthorization(_)))),
6326 "expected no authorization request for allowed .md file"
6327 );
6328}
6329
6330#[gpui::test]
6331async fn test_edit_file_tool_allow_still_prompts_for_local_settings(cx: &mut TestAppContext) {
6332 init_test(cx);
6333
6334 let fs = FakeFs::new(cx.executor());
6335 fs.insert_tree(
6336 "/root",
6337 json!({
6338 ".zed": {
6339 "settings.json": "{}"
6340 },
6341 "README.md": "# Hello"
6342 }),
6343 )
6344 .await;
6345 let project = Project::test(fs.clone(), ["/root".as_ref()], cx).await;
6346
6347 cx.update(|cx| {
6348 let mut settings = agent_settings::AgentSettings::get_global(cx).clone();
6349 settings.tool_permissions.default = settings::ToolPermissionMode::Allow;
6350 agent_settings::AgentSettings::override_global(settings, cx);
6351 });
6352
6353 let context_server_registry =
6354 cx.new(|cx| crate::ContextServerRegistry::new(project.read(cx).context_server_store(), cx));
6355 let language_registry = project.read_with(cx, |project, _cx| project.languages().clone());
6356 let templates = crate::Templates::new();
6357 let thread = cx.new(|cx| {
6358 crate::Thread::new(
6359 project.clone(),
6360 cx.new(|_cx| prompt_store::ProjectContext::default()),
6361 context_server_registry,
6362 templates.clone(),
6363 None,
6364 cx,
6365 )
6366 });
6367
6368 #[allow(clippy::arc_with_non_send_sync)]
6369 let tool = Arc::new(crate::EditFileTool::new(
6370 project,
6371 thread.downgrade(),
6372 language_registry,
6373 templates,
6374 ));
6375
6376 // Editing a file inside .zed/ should still prompt even with global default: allow,
6377 // because local settings paths are sensitive and require confirmation regardless.
6378 let (event_stream, mut rx) = crate::ToolCallEventStream::test();
6379 let _task = cx.update(|cx| {
6380 tool.run(
6381 ToolInput::resolved(crate::EditFileToolInput {
6382 display_description: "Edit local settings".to_string(),
6383 path: "root/.zed/settings.json".into(),
6384 mode: crate::EditFileMode::Edit,
6385 }),
6386 event_stream,
6387 cx,
6388 )
6389 });
6390
6391 let _update = rx.expect_update_fields().await;
6392 let _auth = rx.expect_authorization().await;
6393}
6394
6395#[gpui::test]
6396async fn test_fetch_tool_deny_rule_blocks_url(cx: &mut TestAppContext) {
6397 init_test(cx);
6398
6399 cx.update(|cx| {
6400 let mut settings = agent_settings::AgentSettings::get_global(cx).clone();
6401 settings.tool_permissions.tools.insert(
6402 FetchTool::NAME.into(),
6403 agent_settings::ToolRules {
6404 default: Some(settings::ToolPermissionMode::Allow),
6405 always_allow: vec![],
6406 always_deny: vec![
6407 agent_settings::CompiledRegex::new(r"internal\.company\.com", false).unwrap(),
6408 ],
6409 always_confirm: vec![],
6410 invalid_patterns: vec![],
6411 },
6412 );
6413 agent_settings::AgentSettings::override_global(settings, cx);
6414 });
6415
6416 let http_client = gpui::http_client::FakeHttpClient::with_200_response();
6417
6418 #[allow(clippy::arc_with_non_send_sync)]
6419 let tool = Arc::new(crate::FetchTool::new(http_client));
6420 let (event_stream, _rx) = crate::ToolCallEventStream::test();
6421
6422 let input: crate::FetchToolInput =
6423 serde_json::from_value(json!({"url": "https://internal.company.com/api"})).unwrap();
6424
6425 let task = cx.update(|cx| tool.run(ToolInput::resolved(input), event_stream, cx));
6426
6427 let result = task.await;
6428 assert!(result.is_err(), "expected fetch to be blocked");
6429 assert!(
6430 result.unwrap_err().contains("blocked"),
6431 "error should mention the fetch was blocked"
6432 );
6433}
6434
6435#[gpui::test]
6436async fn test_fetch_tool_allow_rule_skips_confirmation(cx: &mut TestAppContext) {
6437 init_test(cx);
6438
6439 cx.update(|cx| {
6440 let mut settings = agent_settings::AgentSettings::get_global(cx).clone();
6441 settings.tool_permissions.tools.insert(
6442 FetchTool::NAME.into(),
6443 agent_settings::ToolRules {
6444 default: Some(settings::ToolPermissionMode::Confirm),
6445 always_allow: vec![agent_settings::CompiledRegex::new(r"docs\.rs", false).unwrap()],
6446 always_deny: vec![],
6447 always_confirm: vec![],
6448 invalid_patterns: vec![],
6449 },
6450 );
6451 agent_settings::AgentSettings::override_global(settings, cx);
6452 });
6453
6454 let http_client = gpui::http_client::FakeHttpClient::with_200_response();
6455
6456 #[allow(clippy::arc_with_non_send_sync)]
6457 let tool = Arc::new(crate::FetchTool::new(http_client));
6458 let (event_stream, mut rx) = crate::ToolCallEventStream::test();
6459
6460 let input: crate::FetchToolInput =
6461 serde_json::from_value(json!({"url": "https://docs.rs/some-crate"})).unwrap();
6462
6463 let _task = cx.update(|cx| tool.run(ToolInput::resolved(input), event_stream, cx));
6464
6465 cx.run_until_parked();
6466
6467 let event = rx.try_recv();
6468 assert!(
6469 !matches!(event, Ok(Ok(ThreadEvent::ToolCallAuthorization(_)))),
6470 "expected no authorization request for allowed docs.rs URL"
6471 );
6472}
6473
6474#[gpui::test]
6475async fn test_queued_message_ends_turn_at_boundary(cx: &mut TestAppContext) {
6476 init_test(cx);
6477 always_allow_tools(cx);
6478
6479 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
6480 let fake_model = model.as_fake();
6481
6482 // Add a tool so we can simulate tool calls
6483 thread.update(cx, |thread, _cx| {
6484 thread.add_tool(EchoTool);
6485 });
6486
6487 // Start a turn by sending a message
6488 let mut events = thread
6489 .update(cx, |thread, cx| {
6490 thread.send(UserMessageId::new(), ["Use the echo tool"], cx)
6491 })
6492 .unwrap();
6493 cx.run_until_parked();
6494
6495 // Simulate the model making a tool call
6496 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
6497 LanguageModelToolUse {
6498 id: "tool_1".into(),
6499 name: "echo".into(),
6500 raw_input: r#"{"text": "hello"}"#.into(),
6501 input: json!({"text": "hello"}),
6502 is_input_complete: true,
6503 thought_signature: None,
6504 },
6505 ));
6506 fake_model
6507 .send_last_completion_stream_event(LanguageModelCompletionEvent::Stop(StopReason::ToolUse));
6508
6509 // Signal that a message is queued before ending the stream
6510 thread.update(cx, |thread, _cx| {
6511 thread.set_has_queued_message(true);
6512 });
6513
6514 // Now end the stream - tool will run, and the boundary check should see the queue
6515 fake_model.end_last_completion_stream();
6516
6517 // Collect all events until the turn stops
6518 let all_events = collect_events_until_stop(&mut events, cx).await;
6519
6520 // Verify we received the tool call event
6521 let tool_call_ids: Vec<_> = all_events
6522 .iter()
6523 .filter_map(|e| match e {
6524 Ok(ThreadEvent::ToolCall(tc)) => Some(tc.tool_call_id.to_string()),
6525 _ => None,
6526 })
6527 .collect();
6528 assert_eq!(
6529 tool_call_ids,
6530 vec!["tool_1"],
6531 "Should have received a tool call event for our echo tool"
6532 );
6533
6534 // The turn should have stopped with EndTurn
6535 let stop_reasons = stop_events(all_events);
6536 assert_eq!(
6537 stop_reasons,
6538 vec![acp::StopReason::EndTurn],
6539 "Turn should have ended after tool completion due to queued message"
6540 );
6541
6542 // Verify the queued message flag is still set
6543 thread.update(cx, |thread, _cx| {
6544 assert!(
6545 thread.has_queued_message(),
6546 "Should still have queued message flag set"
6547 );
6548 });
6549
6550 // Thread should be idle now
6551 thread.update(cx, |thread, _cx| {
6552 assert!(
6553 thread.is_turn_complete(),
6554 "Thread should not be running after turn ends"
6555 );
6556 });
6557}
6558
6559#[gpui::test]
6560async fn test_streaming_tool_error_breaks_stream_loop_immediately(cx: &mut TestAppContext) {
6561 init_test(cx);
6562 always_allow_tools(cx);
6563
6564 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
6565 let fake_model = model.as_fake();
6566
6567 thread.update(cx, |thread, _cx| {
6568 thread.add_tool(StreamingFailingEchoTool {
6569 receive_chunks_until_failure: 1,
6570 });
6571 });
6572
6573 let _events = thread
6574 .update(cx, |thread, cx| {
6575 thread.send(
6576 UserMessageId::new(),
6577 ["Use the streaming_failing_echo tool"],
6578 cx,
6579 )
6580 })
6581 .unwrap();
6582 cx.run_until_parked();
6583
6584 let tool_use = LanguageModelToolUse {
6585 id: "call_1".into(),
6586 name: StreamingFailingEchoTool::NAME.into(),
6587 raw_input: "hello".into(),
6588 input: json!({}),
6589 is_input_complete: false,
6590 thought_signature: None,
6591 };
6592
6593 fake_model
6594 .send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(tool_use.clone()));
6595
6596 cx.run_until_parked();
6597
6598 let completions = fake_model.pending_completions();
6599 let last_completion = completions.last().unwrap();
6600
6601 assert_eq!(
6602 last_completion.messages[1..],
6603 vec![
6604 LanguageModelRequestMessage {
6605 role: Role::User,
6606 content: vec!["Use the streaming_failing_echo tool".into()],
6607 cache: false,
6608 reasoning_details: None,
6609 },
6610 LanguageModelRequestMessage {
6611 role: Role::Assistant,
6612 content: vec![language_model::MessageContent::ToolUse(tool_use.clone())],
6613 cache: false,
6614 reasoning_details: None,
6615 },
6616 LanguageModelRequestMessage {
6617 role: Role::User,
6618 content: vec![language_model::MessageContent::ToolResult(
6619 LanguageModelToolResult {
6620 tool_use_id: tool_use.id.clone(),
6621 tool_name: tool_use.name,
6622 is_error: true,
6623 content: "failed".into(),
6624 output: Some("failed".into()),
6625 }
6626 )],
6627 cache: true,
6628 reasoning_details: None,
6629 },
6630 ]
6631 );
6632}
6633
6634#[gpui::test]
6635async fn test_streaming_tool_error_waits_for_prior_tools_to_complete(cx: &mut TestAppContext) {
6636 init_test(cx);
6637 always_allow_tools(cx);
6638
6639 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
6640 let fake_model = model.as_fake();
6641
6642 let (complete_streaming_echo_tool_call_tx, complete_streaming_echo_tool_call_rx) =
6643 oneshot::channel();
6644
6645 thread.update(cx, |thread, _cx| {
6646 thread.add_tool(
6647 StreamingEchoTool::new().with_wait_until_complete(complete_streaming_echo_tool_call_rx),
6648 );
6649 thread.add_tool(StreamingFailingEchoTool {
6650 receive_chunks_until_failure: 1,
6651 });
6652 });
6653
6654 let _events = thread
6655 .update(cx, |thread, cx| {
6656 thread.send(
6657 UserMessageId::new(),
6658 ["Use the streaming_echo tool and the streaming_failing_echo tool"],
6659 cx,
6660 )
6661 })
6662 .unwrap();
6663 cx.run_until_parked();
6664
6665 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
6666 LanguageModelToolUse {
6667 id: "call_1".into(),
6668 name: StreamingEchoTool::NAME.into(),
6669 raw_input: "hello".into(),
6670 input: json!({ "text": "hello" }),
6671 is_input_complete: false,
6672 thought_signature: None,
6673 },
6674 ));
6675 let first_tool_use = LanguageModelToolUse {
6676 id: "call_1".into(),
6677 name: StreamingEchoTool::NAME.into(),
6678 raw_input: "hello world".into(),
6679 input: json!({ "text": "hello world" }),
6680 is_input_complete: true,
6681 thought_signature: None,
6682 };
6683 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
6684 first_tool_use.clone(),
6685 ));
6686 let second_tool_use = LanguageModelToolUse {
6687 name: StreamingFailingEchoTool::NAME.into(),
6688 raw_input: "hello".into(),
6689 input: json!({ "text": "hello" }),
6690 is_input_complete: false,
6691 thought_signature: None,
6692 id: "call_2".into(),
6693 };
6694 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
6695 second_tool_use.clone(),
6696 ));
6697
6698 cx.run_until_parked();
6699
6700 complete_streaming_echo_tool_call_tx.send(()).unwrap();
6701
6702 cx.run_until_parked();
6703
6704 let completions = fake_model.pending_completions();
6705 let last_completion = completions.last().unwrap();
6706
6707 assert_eq!(
6708 last_completion.messages[1..],
6709 vec![
6710 LanguageModelRequestMessage {
6711 role: Role::User,
6712 content: vec![
6713 "Use the streaming_echo tool and the streaming_failing_echo tool".into()
6714 ],
6715 cache: false,
6716 reasoning_details: None,
6717 },
6718 LanguageModelRequestMessage {
6719 role: Role::Assistant,
6720 content: vec![
6721 language_model::MessageContent::ToolUse(first_tool_use.clone()),
6722 language_model::MessageContent::ToolUse(second_tool_use.clone())
6723 ],
6724 cache: false,
6725 reasoning_details: None,
6726 },
6727 LanguageModelRequestMessage {
6728 role: Role::User,
6729 content: vec![
6730 language_model::MessageContent::ToolResult(LanguageModelToolResult {
6731 tool_use_id: second_tool_use.id.clone(),
6732 tool_name: second_tool_use.name,
6733 is_error: true,
6734 content: "failed".into(),
6735 output: Some("failed".into()),
6736 }),
6737 language_model::MessageContent::ToolResult(LanguageModelToolResult {
6738 tool_use_id: first_tool_use.id.clone(),
6739 tool_name: first_tool_use.name,
6740 is_error: false,
6741 content: "hello world".into(),
6742 output: Some("hello world".into()),
6743 }),
6744 ],
6745 cache: true,
6746 reasoning_details: None,
6747 },
6748 ]
6749 );
6750}
6751
6752#[gpui::test]
6753async fn test_mid_turn_model_and_settings_refresh(cx: &mut TestAppContext) {
6754 let ThreadTest {
6755 model, thread, fs, ..
6756 } = setup(cx, TestModel::Fake).await;
6757 let fake_model_a = model.as_fake();
6758
6759 thread.update(cx, |thread, _cx| {
6760 thread.add_tool(EchoTool);
6761 thread.add_tool(DelayTool);
6762 });
6763
6764 // Set up two profiles: profile-a has both tools, profile-b has only DelayTool.
6765 fs.insert_file(
6766 paths::settings_file(),
6767 json!({
6768 "agent": {
6769 "profiles": {
6770 "profile-a": {
6771 "name": "Profile A",
6772 "tools": {
6773 EchoTool::NAME: true,
6774 DelayTool::NAME: true,
6775 }
6776 },
6777 "profile-b": {
6778 "name": "Profile B",
6779 "tools": {
6780 DelayTool::NAME: true,
6781 }
6782 }
6783 }
6784 }
6785 })
6786 .to_string()
6787 .into_bytes(),
6788 )
6789 .await;
6790 cx.run_until_parked();
6791
6792 thread.update(cx, |thread, cx| {
6793 thread.set_profile(AgentProfileId("profile-a".into()), cx);
6794 thread.set_thinking_enabled(false, cx);
6795 });
6796
6797 // Send a message — first iteration starts with model A, profile-a, thinking off.
6798 thread
6799 .update(cx, |thread, cx| {
6800 thread.send(UserMessageId::new(), ["test mid-turn refresh"], cx)
6801 })
6802 .unwrap();
6803 cx.run_until_parked();
6804
6805 // Verify first request has both tools and thinking disabled.
6806 let completions = fake_model_a.pending_completions();
6807 assert_eq!(completions.len(), 1);
6808 let first_tools = tool_names_for_completion(&completions[0]);
6809 assert_eq!(first_tools, vec![DelayTool::NAME, EchoTool::NAME]);
6810 assert!(!completions[0].thinking_allowed);
6811
6812 // Model A responds with an echo tool call.
6813 fake_model_a.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
6814 LanguageModelToolUse {
6815 id: "tool_1".into(),
6816 name: "echo".into(),
6817 raw_input: r#"{"text":"hello"}"#.into(),
6818 input: json!({"text": "hello"}),
6819 is_input_complete: true,
6820 thought_signature: None,
6821 },
6822 ));
6823 fake_model_a.end_last_completion_stream();
6824
6825 // Before the next iteration runs, switch to profile-b (only DelayTool),
6826 // swap in a new model, and enable thinking.
6827 let fake_model_b = Arc::new(FakeLanguageModel::with_id_and_thinking(
6828 "test-provider",
6829 "model-b",
6830 "Model B",
6831 true,
6832 ));
6833 thread.update(cx, |thread, cx| {
6834 thread.set_profile(AgentProfileId("profile-b".into()), cx);
6835 thread.set_model(fake_model_b.clone() as Arc<dyn LanguageModel>, cx);
6836 thread.set_thinking_enabled(true, cx);
6837 });
6838
6839 // Run until parked — processes the echo tool call, loops back, picks up
6840 // the new model/profile/thinking, and makes a second request to model B.
6841 cx.run_until_parked();
6842
6843 // The second request should have gone to model B.
6844 let model_b_completions = fake_model_b.pending_completions();
6845 assert_eq!(
6846 model_b_completions.len(),
6847 1,
6848 "second request should go to model B"
6849 );
6850
6851 // Profile-b only has DelayTool, so echo should be gone.
6852 let second_tools = tool_names_for_completion(&model_b_completions[0]);
6853 assert_eq!(second_tools, vec![DelayTool::NAME]);
6854
6855 // Thinking should now be enabled.
6856 assert!(model_b_completions[0].thinking_allowed);
6857}