1use super::*;
2use acp_thread::{
3 AgentConnection, AgentModelGroupName, AgentModelList, PermissionOptions, ThreadStatus,
4 UserMessageId,
5};
6use agent_client_protocol::{self as acp};
7use agent_settings::AgentProfileId;
8use anyhow::Result;
9use client::{Client, UserStore};
10use collections::IndexMap;
11use context_server::{ContextServer, ContextServerCommand, ContextServerId};
12use feature_flags::FeatureFlagAppExt as _;
13use fs::{FakeFs, Fs};
14use futures::{
15 FutureExt as _, StreamExt,
16 channel::{
17 mpsc::{self, UnboundedReceiver},
18 oneshot,
19 },
20 future::{Fuse, Shared},
21};
22use gpui::{
23 App, AppContext, AsyncApp, Entity, Task, TestAppContext, UpdateGlobal,
24 http_client::FakeHttpClient,
25};
26use indoc::indoc;
27use language_model::{
28 CompletionIntent, LanguageModel, LanguageModelCompletionError, LanguageModelCompletionEvent,
29 LanguageModelId, LanguageModelProviderName, LanguageModelRegistry, LanguageModelRequest,
30 LanguageModelRequestMessage, LanguageModelToolResult, LanguageModelToolSchemaFormat,
31 LanguageModelToolUse, MessageContent, Role, StopReason, TokenUsage,
32 fake_provider::FakeLanguageModel,
33};
34use pretty_assertions::assert_eq;
35use project::{
36 Project, context_server_store::ContextServerStore, project_settings::ProjectSettings,
37};
38use prompt_store::ProjectContext;
39use reqwest_client::ReqwestClient;
40use schemars::JsonSchema;
41use serde::{Deserialize, Serialize};
42use serde_json::json;
43use settings::{Settings, SettingsStore};
44use std::{
45 path::Path,
46 pin::Pin,
47 rc::Rc,
48 sync::{
49 Arc,
50 atomic::{AtomicBool, AtomicUsize, Ordering},
51 },
52 time::Duration,
53};
54use util::path;
55
56mod edit_file_thread_test;
57mod test_tools;
58use test_tools::*;
59
60pub(crate) fn init_test(cx: &mut TestAppContext) {
61 cx.update(|cx| {
62 let settings_store = SettingsStore::test(cx);
63 cx.set_global(settings_store);
64 });
65}
66
67pub(crate) struct FakeTerminalHandle {
68 killed: Arc<AtomicBool>,
69 stopped_by_user: Arc<AtomicBool>,
70 exit_sender: std::cell::RefCell<Option<futures::channel::oneshot::Sender<()>>>,
71 wait_for_exit: Shared<Task<acp::TerminalExitStatus>>,
72 output: acp::TerminalOutputResponse,
73 id: acp::TerminalId,
74}
75
76impl FakeTerminalHandle {
77 pub(crate) fn new_never_exits(cx: &mut App) -> Self {
78 let killed = Arc::new(AtomicBool::new(false));
79 let stopped_by_user = Arc::new(AtomicBool::new(false));
80
81 let (exit_sender, exit_receiver) = futures::channel::oneshot::channel();
82
83 let wait_for_exit = cx
84 .spawn(async move |_cx| {
85 // Wait for the exit signal (sent when kill() is called)
86 let _ = exit_receiver.await;
87 acp::TerminalExitStatus::new()
88 })
89 .shared();
90
91 Self {
92 killed,
93 stopped_by_user,
94 exit_sender: std::cell::RefCell::new(Some(exit_sender)),
95 wait_for_exit,
96 output: acp::TerminalOutputResponse::new("partial output".to_string(), false),
97 id: acp::TerminalId::new("fake_terminal".to_string()),
98 }
99 }
100
101 pub(crate) fn new_with_immediate_exit(cx: &mut App, exit_code: u32) -> Self {
102 let killed = Arc::new(AtomicBool::new(false));
103 let stopped_by_user = Arc::new(AtomicBool::new(false));
104 let (exit_sender, _exit_receiver) = futures::channel::oneshot::channel();
105
106 let wait_for_exit = cx
107 .spawn(async move |_cx| acp::TerminalExitStatus::new().exit_code(exit_code))
108 .shared();
109
110 Self {
111 killed,
112 stopped_by_user,
113 exit_sender: std::cell::RefCell::new(Some(exit_sender)),
114 wait_for_exit,
115 output: acp::TerminalOutputResponse::new("command output".to_string(), false),
116 id: acp::TerminalId::new("fake_terminal".to_string()),
117 }
118 }
119
120 pub(crate) fn was_killed(&self) -> bool {
121 self.killed.load(Ordering::SeqCst)
122 }
123
124 pub(crate) fn set_stopped_by_user(&self, stopped: bool) {
125 self.stopped_by_user.store(stopped, Ordering::SeqCst);
126 }
127
128 pub(crate) fn signal_exit(&self) {
129 if let Some(sender) = self.exit_sender.borrow_mut().take() {
130 let _ = sender.send(());
131 }
132 }
133}
134
135impl crate::TerminalHandle for FakeTerminalHandle {
136 fn id(&self, _cx: &AsyncApp) -> Result<acp::TerminalId> {
137 Ok(self.id.clone())
138 }
139
140 fn current_output(&self, _cx: &AsyncApp) -> Result<acp::TerminalOutputResponse> {
141 Ok(self.output.clone())
142 }
143
144 fn wait_for_exit(&self, _cx: &AsyncApp) -> Result<Shared<Task<acp::TerminalExitStatus>>> {
145 Ok(self.wait_for_exit.clone())
146 }
147
148 fn kill(&self, _cx: &AsyncApp) -> Result<()> {
149 self.killed.store(true, Ordering::SeqCst);
150 self.signal_exit();
151 Ok(())
152 }
153
154 fn was_stopped_by_user(&self, _cx: &AsyncApp) -> Result<bool> {
155 Ok(self.stopped_by_user.load(Ordering::SeqCst))
156 }
157}
158
159struct FakeSubagentHandle {
160 session_id: acp::SessionId,
161 send_task: Shared<Task<String>>,
162}
163
164impl SubagentHandle for FakeSubagentHandle {
165 fn id(&self) -> acp::SessionId {
166 self.session_id.clone()
167 }
168
169 fn num_entries(&self, _cx: &App) -> usize {
170 unimplemented!()
171 }
172
173 fn send(&self, _message: String, cx: &AsyncApp) -> Task<Result<String>> {
174 let task = self.send_task.clone();
175 cx.background_spawn(async move { Ok(task.await) })
176 }
177}
178
179#[derive(Default)]
180pub(crate) struct FakeThreadEnvironment {
181 terminal_handle: Option<Rc<FakeTerminalHandle>>,
182 subagent_handle: Option<Rc<FakeSubagentHandle>>,
183 terminal_creations: Arc<AtomicUsize>,
184}
185
186impl FakeThreadEnvironment {
187 pub(crate) fn with_terminal(self, terminal_handle: FakeTerminalHandle) -> Self {
188 Self {
189 terminal_handle: Some(terminal_handle.into()),
190 ..self
191 }
192 }
193
194 pub(crate) fn terminal_creation_count(&self) -> usize {
195 self.terminal_creations.load(Ordering::SeqCst)
196 }
197}
198
199impl crate::ThreadEnvironment for FakeThreadEnvironment {
200 fn create_terminal(
201 &self,
202 _command: String,
203 _cwd: Option<std::path::PathBuf>,
204 _output_byte_limit: Option<u64>,
205 _cx: &mut AsyncApp,
206 ) -> Task<Result<Rc<dyn crate::TerminalHandle>>> {
207 self.terminal_creations.fetch_add(1, Ordering::SeqCst);
208 let handle = self
209 .terminal_handle
210 .clone()
211 .expect("Terminal handle not available on FakeThreadEnvironment");
212 Task::ready(Ok(handle as Rc<dyn crate::TerminalHandle>))
213 }
214
215 fn create_subagent(&self, _label: String, _cx: &mut App) -> Result<Rc<dyn SubagentHandle>> {
216 Ok(self
217 .subagent_handle
218 .clone()
219 .expect("Subagent handle not available on FakeThreadEnvironment")
220 as Rc<dyn SubagentHandle>)
221 }
222}
223
224/// Environment that creates multiple independent terminal handles for testing concurrent terminals.
225struct MultiTerminalEnvironment {
226 handles: std::cell::RefCell<Vec<Rc<FakeTerminalHandle>>>,
227}
228
229impl MultiTerminalEnvironment {
230 fn new() -> Self {
231 Self {
232 handles: std::cell::RefCell::new(Vec::new()),
233 }
234 }
235
236 fn handles(&self) -> Vec<Rc<FakeTerminalHandle>> {
237 self.handles.borrow().clone()
238 }
239}
240
241impl crate::ThreadEnvironment for MultiTerminalEnvironment {
242 fn create_terminal(
243 &self,
244 _command: String,
245 _cwd: Option<std::path::PathBuf>,
246 _output_byte_limit: Option<u64>,
247 cx: &mut AsyncApp,
248 ) -> Task<Result<Rc<dyn crate::TerminalHandle>>> {
249 let handle = Rc::new(cx.update(|cx| FakeTerminalHandle::new_never_exits(cx)));
250 self.handles.borrow_mut().push(handle.clone());
251 Task::ready(Ok(handle as Rc<dyn crate::TerminalHandle>))
252 }
253
254 fn create_subagent(&self, _label: String, _cx: &mut App) -> Result<Rc<dyn SubagentHandle>> {
255 unimplemented!()
256 }
257}
258
259fn always_allow_tools(cx: &mut TestAppContext) {
260 cx.update(|cx| {
261 let mut settings = agent_settings::AgentSettings::get_global(cx).clone();
262 settings.tool_permissions.default = settings::ToolPermissionMode::Allow;
263 agent_settings::AgentSettings::override_global(settings, cx);
264 });
265}
266
267#[gpui::test]
268async fn test_echo(cx: &mut TestAppContext) {
269 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
270 let fake_model = model.as_fake();
271
272 let events = thread
273 .update(cx, |thread, cx| {
274 thread.send(UserMessageId::new(), ["Testing: Reply with 'Hello'"], cx)
275 })
276 .unwrap();
277 cx.run_until_parked();
278 fake_model.send_last_completion_stream_text_chunk("Hello");
279 fake_model
280 .send_last_completion_stream_event(LanguageModelCompletionEvent::Stop(StopReason::EndTurn));
281 fake_model.end_last_completion_stream();
282
283 let events = events.collect().await;
284 thread.update(cx, |thread, _cx| {
285 assert_eq!(
286 thread.last_received_or_pending_message().unwrap().role(),
287 Role::Assistant
288 );
289 assert_eq!(
290 thread
291 .last_received_or_pending_message()
292 .unwrap()
293 .to_markdown(),
294 "Hello\n"
295 )
296 });
297 assert_eq!(stop_events(events), vec![acp::StopReason::EndTurn]);
298}
299
300#[gpui::test]
301async fn test_terminal_tool_timeout_kills_handle(cx: &mut TestAppContext) {
302 init_test(cx);
303 always_allow_tools(cx);
304
305 let fs = FakeFs::new(cx.executor());
306 let project = Project::test(fs, [], cx).await;
307
308 let environment = Rc::new(cx.update(|cx| {
309 FakeThreadEnvironment::default().with_terminal(FakeTerminalHandle::new_never_exits(cx))
310 }));
311 let handle = environment.terminal_handle.clone().unwrap();
312
313 #[allow(clippy::arc_with_non_send_sync)]
314 let tool = Arc::new(crate::TerminalTool::new(project, environment));
315 let (event_stream, mut rx) = crate::ToolCallEventStream::test();
316
317 let task = cx.update(|cx| {
318 tool.run(
319 ToolInput::resolved(crate::TerminalToolInput {
320 command: "sleep 1000".to_string(),
321 cd: ".".to_string(),
322 timeout_ms: Some(5),
323 }),
324 event_stream,
325 cx,
326 )
327 });
328
329 let update = rx.expect_update_fields().await;
330 assert!(
331 update.content.iter().any(|blocks| {
332 blocks
333 .iter()
334 .any(|c| matches!(c, acp::ToolCallContent::Terminal(_)))
335 }),
336 "expected tool call update to include terminal content"
337 );
338
339 let mut task_future: Pin<Box<Fuse<Task<Result<String, String>>>>> = Box::pin(task.fuse());
340
341 let deadline = std::time::Instant::now() + Duration::from_millis(500);
342 loop {
343 if let Some(result) = task_future.as_mut().now_or_never() {
344 let result = result.expect("terminal tool task should complete");
345
346 assert!(
347 handle.was_killed(),
348 "expected terminal handle to be killed on timeout"
349 );
350 assert!(
351 result.contains("partial output"),
352 "expected result to include terminal output, got: {result}"
353 );
354 return;
355 }
356
357 if std::time::Instant::now() >= deadline {
358 panic!("timed out waiting for terminal tool task to complete");
359 }
360
361 cx.run_until_parked();
362 cx.background_executor.timer(Duration::from_millis(1)).await;
363 }
364}
365
366#[gpui::test]
367#[ignore]
368async fn test_terminal_tool_without_timeout_does_not_kill_handle(cx: &mut TestAppContext) {
369 init_test(cx);
370 always_allow_tools(cx);
371
372 let fs = FakeFs::new(cx.executor());
373 let project = Project::test(fs, [], cx).await;
374
375 let environment = Rc::new(cx.update(|cx| {
376 FakeThreadEnvironment::default().with_terminal(FakeTerminalHandle::new_never_exits(cx))
377 }));
378 let handle = environment.terminal_handle.clone().unwrap();
379
380 #[allow(clippy::arc_with_non_send_sync)]
381 let tool = Arc::new(crate::TerminalTool::new(project, environment));
382 let (event_stream, mut rx) = crate::ToolCallEventStream::test();
383
384 let _task = cx.update(|cx| {
385 tool.run(
386 ToolInput::resolved(crate::TerminalToolInput {
387 command: "sleep 1000".to_string(),
388 cd: ".".to_string(),
389 timeout_ms: None,
390 }),
391 event_stream,
392 cx,
393 )
394 });
395
396 let update = rx.expect_update_fields().await;
397 assert!(
398 update.content.iter().any(|blocks| {
399 blocks
400 .iter()
401 .any(|c| matches!(c, acp::ToolCallContent::Terminal(_)))
402 }),
403 "expected tool call update to include terminal content"
404 );
405
406 cx.background_executor
407 .timer(Duration::from_millis(25))
408 .await;
409
410 assert!(
411 !handle.was_killed(),
412 "did not expect terminal handle to be killed without a timeout"
413 );
414}
415
416#[gpui::test]
417async fn test_thinking(cx: &mut TestAppContext) {
418 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
419 let fake_model = model.as_fake();
420
421 let events = thread
422 .update(cx, |thread, cx| {
423 thread.send(
424 UserMessageId::new(),
425 [indoc! {"
426 Testing:
427
428 Generate a thinking step where you just think the word 'Think',
429 and have your final answer be 'Hello'
430 "}],
431 cx,
432 )
433 })
434 .unwrap();
435 cx.run_until_parked();
436 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::Thinking {
437 text: "Think".to_string(),
438 signature: None,
439 });
440 fake_model.send_last_completion_stream_text_chunk("Hello");
441 fake_model
442 .send_last_completion_stream_event(LanguageModelCompletionEvent::Stop(StopReason::EndTurn));
443 fake_model.end_last_completion_stream();
444
445 let events = events.collect().await;
446 thread.update(cx, |thread, _cx| {
447 assert_eq!(
448 thread.last_received_or_pending_message().unwrap().role(),
449 Role::Assistant
450 );
451 assert_eq!(
452 thread
453 .last_received_or_pending_message()
454 .unwrap()
455 .to_markdown(),
456 indoc! {"
457 <think>Think</think>
458 Hello
459 "}
460 )
461 });
462 assert_eq!(stop_events(events), vec![acp::StopReason::EndTurn]);
463}
464
465#[gpui::test]
466async fn test_system_prompt(cx: &mut TestAppContext) {
467 let ThreadTest {
468 model,
469 thread,
470 project_context,
471 ..
472 } = setup(cx, TestModel::Fake).await;
473 let fake_model = model.as_fake();
474
475 project_context.update(cx, |project_context, _cx| {
476 project_context.shell = "test-shell".into()
477 });
478 thread.update(cx, |thread, _| thread.add_tool(EchoTool));
479 thread
480 .update(cx, |thread, cx| {
481 thread.send(UserMessageId::new(), ["abc"], cx)
482 })
483 .unwrap();
484 cx.run_until_parked();
485 let mut pending_completions = fake_model.pending_completions();
486 assert_eq!(
487 pending_completions.len(),
488 1,
489 "unexpected pending completions: {:?}",
490 pending_completions
491 );
492
493 let pending_completion = pending_completions.pop().unwrap();
494 assert_eq!(pending_completion.messages[0].role, Role::System);
495
496 let system_message = &pending_completion.messages[0];
497 let system_prompt = system_message.content[0].to_str().unwrap();
498 assert!(
499 system_prompt.contains("test-shell"),
500 "unexpected system message: {:?}",
501 system_message
502 );
503 assert!(
504 system_prompt.contains("## Fixing Diagnostics"),
505 "unexpected system message: {:?}",
506 system_message
507 );
508}
509
510#[gpui::test]
511async fn test_system_prompt_without_tools(cx: &mut TestAppContext) {
512 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
513 let fake_model = model.as_fake();
514
515 thread
516 .update(cx, |thread, cx| {
517 thread.send(UserMessageId::new(), ["abc"], cx)
518 })
519 .unwrap();
520 cx.run_until_parked();
521 let mut pending_completions = fake_model.pending_completions();
522 assert_eq!(
523 pending_completions.len(),
524 1,
525 "unexpected pending completions: {:?}",
526 pending_completions
527 );
528
529 let pending_completion = pending_completions.pop().unwrap();
530 assert_eq!(pending_completion.messages[0].role, Role::System);
531
532 let system_message = &pending_completion.messages[0];
533 let system_prompt = system_message.content[0].to_str().unwrap();
534 assert!(
535 !system_prompt.contains("## Tool Use"),
536 "unexpected system message: {:?}",
537 system_message
538 );
539 assert!(
540 !system_prompt.contains("## Fixing Diagnostics"),
541 "unexpected system message: {:?}",
542 system_message
543 );
544}
545
546#[gpui::test]
547async fn test_prompt_caching(cx: &mut TestAppContext) {
548 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
549 let fake_model = model.as_fake();
550
551 // Send initial user message and verify it's cached
552 thread
553 .update(cx, |thread, cx| {
554 thread.send(UserMessageId::new(), ["Message 1"], cx)
555 })
556 .unwrap();
557 cx.run_until_parked();
558
559 let completion = fake_model.pending_completions().pop().unwrap();
560 assert_eq!(
561 completion.messages[1..],
562 vec![LanguageModelRequestMessage {
563 role: Role::User,
564 content: vec!["Message 1".into()],
565 cache: true,
566 reasoning_details: None,
567 }]
568 );
569 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::Text(
570 "Response to Message 1".into(),
571 ));
572 fake_model.end_last_completion_stream();
573 cx.run_until_parked();
574
575 // Send another user message and verify only the latest is cached
576 thread
577 .update(cx, |thread, cx| {
578 thread.send(UserMessageId::new(), ["Message 2"], cx)
579 })
580 .unwrap();
581 cx.run_until_parked();
582
583 let completion = fake_model.pending_completions().pop().unwrap();
584 assert_eq!(
585 completion.messages[1..],
586 vec![
587 LanguageModelRequestMessage {
588 role: Role::User,
589 content: vec!["Message 1".into()],
590 cache: false,
591 reasoning_details: None,
592 },
593 LanguageModelRequestMessage {
594 role: Role::Assistant,
595 content: vec!["Response to Message 1".into()],
596 cache: false,
597 reasoning_details: None,
598 },
599 LanguageModelRequestMessage {
600 role: Role::User,
601 content: vec!["Message 2".into()],
602 cache: true,
603 reasoning_details: None,
604 }
605 ]
606 );
607 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::Text(
608 "Response to Message 2".into(),
609 ));
610 fake_model.end_last_completion_stream();
611 cx.run_until_parked();
612
613 // Simulate a tool call and verify that the latest tool result is cached
614 thread.update(cx, |thread, _| thread.add_tool(EchoTool));
615 thread
616 .update(cx, |thread, cx| {
617 thread.send(UserMessageId::new(), ["Use the echo tool"], cx)
618 })
619 .unwrap();
620 cx.run_until_parked();
621
622 let tool_use = LanguageModelToolUse {
623 id: "tool_1".into(),
624 name: EchoTool::NAME.into(),
625 raw_input: json!({"text": "test"}).to_string(),
626 input: json!({"text": "test"}),
627 is_input_complete: true,
628 thought_signature: None,
629 };
630 fake_model
631 .send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(tool_use.clone()));
632 fake_model.end_last_completion_stream();
633 cx.run_until_parked();
634
635 let completion = fake_model.pending_completions().pop().unwrap();
636 let tool_result = LanguageModelToolResult {
637 tool_use_id: "tool_1".into(),
638 tool_name: EchoTool::NAME.into(),
639 is_error: false,
640 content: "test".into(),
641 output: Some("test".into()),
642 };
643 assert_eq!(
644 completion.messages[1..],
645 vec![
646 LanguageModelRequestMessage {
647 role: Role::User,
648 content: vec!["Message 1".into()],
649 cache: false,
650 reasoning_details: None,
651 },
652 LanguageModelRequestMessage {
653 role: Role::Assistant,
654 content: vec!["Response to Message 1".into()],
655 cache: false,
656 reasoning_details: None,
657 },
658 LanguageModelRequestMessage {
659 role: Role::User,
660 content: vec!["Message 2".into()],
661 cache: false,
662 reasoning_details: None,
663 },
664 LanguageModelRequestMessage {
665 role: Role::Assistant,
666 content: vec!["Response to Message 2".into()],
667 cache: false,
668 reasoning_details: None,
669 },
670 LanguageModelRequestMessage {
671 role: Role::User,
672 content: vec!["Use the echo tool".into()],
673 cache: false,
674 reasoning_details: None,
675 },
676 LanguageModelRequestMessage {
677 role: Role::Assistant,
678 content: vec![MessageContent::ToolUse(tool_use)],
679 cache: false,
680 reasoning_details: None,
681 },
682 LanguageModelRequestMessage {
683 role: Role::User,
684 content: vec![MessageContent::ToolResult(tool_result)],
685 cache: true,
686 reasoning_details: None,
687 }
688 ]
689 );
690}
691
692#[gpui::test]
693#[cfg_attr(not(feature = "e2e"), ignore)]
694async fn test_basic_tool_calls(cx: &mut TestAppContext) {
695 let ThreadTest { thread, .. } = setup(cx, TestModel::Sonnet4).await;
696
697 // Test a tool call that's likely to complete *before* streaming stops.
698 let events = thread
699 .update(cx, |thread, cx| {
700 thread.add_tool(EchoTool);
701 thread.send(
702 UserMessageId::new(),
703 ["Now test the echo tool with 'Hello'. Does it work? Say 'Yes' or 'No'."],
704 cx,
705 )
706 })
707 .unwrap()
708 .collect()
709 .await;
710 assert_eq!(stop_events(events), vec![acp::StopReason::EndTurn]);
711
712 // Test a tool calls that's likely to complete *after* streaming stops.
713 let events = thread
714 .update(cx, |thread, cx| {
715 thread.remove_tool(&EchoTool::NAME);
716 thread.add_tool(DelayTool);
717 thread.send(
718 UserMessageId::new(),
719 [
720 "Now call the delay tool with 200ms.",
721 "When the timer goes off, then you echo the output of the tool.",
722 ],
723 cx,
724 )
725 })
726 .unwrap()
727 .collect()
728 .await;
729 assert_eq!(stop_events(events), vec![acp::StopReason::EndTurn]);
730 thread.update(cx, |thread, _cx| {
731 assert!(
732 thread
733 .last_received_or_pending_message()
734 .unwrap()
735 .as_agent_message()
736 .unwrap()
737 .content
738 .iter()
739 .any(|content| {
740 if let AgentMessageContent::Text(text) = content {
741 text.contains("Ding")
742 } else {
743 false
744 }
745 }),
746 "{}",
747 thread.to_markdown()
748 );
749 });
750}
751
752#[gpui::test]
753#[cfg_attr(not(feature = "e2e"), ignore)]
754async fn test_streaming_tool_calls(cx: &mut TestAppContext) {
755 let ThreadTest { thread, .. } = setup(cx, TestModel::Sonnet4).await;
756
757 // Test a tool call that's likely to complete *before* streaming stops.
758 let mut events = thread
759 .update(cx, |thread, cx| {
760 thread.add_tool(WordListTool);
761 thread.send(UserMessageId::new(), ["Test the word_list tool."], cx)
762 })
763 .unwrap();
764
765 let mut saw_partial_tool_use = false;
766 while let Some(event) = events.next().await {
767 if let Ok(ThreadEvent::ToolCall(tool_call)) = event {
768 thread.update(cx, |thread, _cx| {
769 // Look for a tool use in the thread's last message
770 let message = thread.last_received_or_pending_message().unwrap();
771 let agent_message = message.as_agent_message().unwrap();
772 let last_content = agent_message.content.last().unwrap();
773 if let AgentMessageContent::ToolUse(last_tool_use) = last_content {
774 assert_eq!(last_tool_use.name.as_ref(), "word_list");
775 if tool_call.status == acp::ToolCallStatus::Pending {
776 if !last_tool_use.is_input_complete
777 && last_tool_use.input.get("g").is_none()
778 {
779 saw_partial_tool_use = true;
780 }
781 } else {
782 last_tool_use
783 .input
784 .get("a")
785 .expect("'a' has streamed because input is now complete");
786 last_tool_use
787 .input
788 .get("g")
789 .expect("'g' has streamed because input is now complete");
790 }
791 } else {
792 panic!("last content should be a tool use");
793 }
794 });
795 }
796 }
797
798 assert!(
799 saw_partial_tool_use,
800 "should see at least one partially streamed tool use in the history"
801 );
802}
803
804#[gpui::test]
805async fn test_tool_authorization(cx: &mut TestAppContext) {
806 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
807 let fake_model = model.as_fake();
808
809 let mut events = thread
810 .update(cx, |thread, cx| {
811 thread.add_tool(ToolRequiringPermission);
812 thread.send(UserMessageId::new(), ["abc"], cx)
813 })
814 .unwrap();
815 cx.run_until_parked();
816 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
817 LanguageModelToolUse {
818 id: "tool_id_1".into(),
819 name: ToolRequiringPermission::NAME.into(),
820 raw_input: "{}".into(),
821 input: json!({}),
822 is_input_complete: true,
823 thought_signature: None,
824 },
825 ));
826 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
827 LanguageModelToolUse {
828 id: "tool_id_2".into(),
829 name: ToolRequiringPermission::NAME.into(),
830 raw_input: "{}".into(),
831 input: json!({}),
832 is_input_complete: true,
833 thought_signature: None,
834 },
835 ));
836 fake_model.end_last_completion_stream();
837 let tool_call_auth_1 = next_tool_call_authorization(&mut events).await;
838 let tool_call_auth_2 = next_tool_call_authorization(&mut events).await;
839
840 // Approve the first - send "allow" option_id (UI transforms "once" to "allow")
841 tool_call_auth_1
842 .response
843 .send(acp_thread::SelectedPermissionOutcome::new(
844 acp::PermissionOptionId::new("allow"),
845 acp::PermissionOptionKind::AllowOnce,
846 ))
847 .unwrap();
848 cx.run_until_parked();
849
850 // Reject the second - send "deny" option_id directly since Deny is now a button
851 tool_call_auth_2
852 .response
853 .send(acp_thread::SelectedPermissionOutcome::new(
854 acp::PermissionOptionId::new("deny"),
855 acp::PermissionOptionKind::RejectOnce,
856 ))
857 .unwrap();
858 cx.run_until_parked();
859
860 let completion = fake_model.pending_completions().pop().unwrap();
861 let message = completion.messages.last().unwrap();
862 assert_eq!(
863 message.content,
864 vec![
865 language_model::MessageContent::ToolResult(LanguageModelToolResult {
866 tool_use_id: tool_call_auth_1.tool_call.tool_call_id.0.to_string().into(),
867 tool_name: ToolRequiringPermission::NAME.into(),
868 is_error: false,
869 content: "Allowed".into(),
870 output: Some("Allowed".into())
871 }),
872 language_model::MessageContent::ToolResult(LanguageModelToolResult {
873 tool_use_id: tool_call_auth_2.tool_call.tool_call_id.0.to_string().into(),
874 tool_name: ToolRequiringPermission::NAME.into(),
875 is_error: true,
876 content: "Permission to run tool denied by user".into(),
877 output: Some("Permission to run tool denied by user".into())
878 })
879 ]
880 );
881
882 // Simulate yet another tool call.
883 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
884 LanguageModelToolUse {
885 id: "tool_id_3".into(),
886 name: ToolRequiringPermission::NAME.into(),
887 raw_input: "{}".into(),
888 input: json!({}),
889 is_input_complete: true,
890 thought_signature: None,
891 },
892 ));
893 fake_model.end_last_completion_stream();
894
895 // Respond by always allowing tools - send transformed option_id
896 // (UI transforms "always:tool_requiring_permission" to "always_allow:tool_requiring_permission")
897 let tool_call_auth_3 = next_tool_call_authorization(&mut events).await;
898 tool_call_auth_3
899 .response
900 .send(acp_thread::SelectedPermissionOutcome::new(
901 acp::PermissionOptionId::new("always_allow:tool_requiring_permission"),
902 acp::PermissionOptionKind::AllowAlways,
903 ))
904 .unwrap();
905 cx.run_until_parked();
906 let completion = fake_model.pending_completions().pop().unwrap();
907 let message = completion.messages.last().unwrap();
908 assert_eq!(
909 message.content,
910 vec![language_model::MessageContent::ToolResult(
911 LanguageModelToolResult {
912 tool_use_id: tool_call_auth_3.tool_call.tool_call_id.0.to_string().into(),
913 tool_name: ToolRequiringPermission::NAME.into(),
914 is_error: false,
915 content: "Allowed".into(),
916 output: Some("Allowed".into())
917 }
918 )]
919 );
920
921 // Simulate a final tool call, ensuring we don't trigger authorization.
922 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
923 LanguageModelToolUse {
924 id: "tool_id_4".into(),
925 name: ToolRequiringPermission::NAME.into(),
926 raw_input: "{}".into(),
927 input: json!({}),
928 is_input_complete: true,
929 thought_signature: None,
930 },
931 ));
932 fake_model.end_last_completion_stream();
933 cx.run_until_parked();
934 let completion = fake_model.pending_completions().pop().unwrap();
935 let message = completion.messages.last().unwrap();
936 assert_eq!(
937 message.content,
938 vec![language_model::MessageContent::ToolResult(
939 LanguageModelToolResult {
940 tool_use_id: "tool_id_4".into(),
941 tool_name: ToolRequiringPermission::NAME.into(),
942 is_error: false,
943 content: "Allowed".into(),
944 output: Some("Allowed".into())
945 }
946 )]
947 );
948}
949
950#[gpui::test]
951async fn test_tool_hallucination(cx: &mut TestAppContext) {
952 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
953 let fake_model = model.as_fake();
954
955 let mut events = thread
956 .update(cx, |thread, cx| {
957 thread.send(UserMessageId::new(), ["abc"], cx)
958 })
959 .unwrap();
960 cx.run_until_parked();
961 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
962 LanguageModelToolUse {
963 id: "tool_id_1".into(),
964 name: "nonexistent_tool".into(),
965 raw_input: "{}".into(),
966 input: json!({}),
967 is_input_complete: true,
968 thought_signature: None,
969 },
970 ));
971 fake_model.end_last_completion_stream();
972
973 let tool_call = expect_tool_call(&mut events).await;
974 assert_eq!(tool_call.title, "nonexistent_tool");
975 assert_eq!(tool_call.status, acp::ToolCallStatus::Pending);
976 let update = expect_tool_call_update_fields(&mut events).await;
977 assert_eq!(update.fields.status, Some(acp::ToolCallStatus::Failed));
978}
979
980async fn expect_tool_call(events: &mut UnboundedReceiver<Result<ThreadEvent>>) -> acp::ToolCall {
981 let event = events
982 .next()
983 .await
984 .expect("no tool call authorization event received")
985 .unwrap();
986 match event {
987 ThreadEvent::ToolCall(tool_call) => tool_call,
988 event => {
989 panic!("Unexpected event {event:?}");
990 }
991 }
992}
993
994async fn expect_tool_call_update_fields(
995 events: &mut UnboundedReceiver<Result<ThreadEvent>>,
996) -> acp::ToolCallUpdate {
997 let event = events
998 .next()
999 .await
1000 .expect("no tool call authorization event received")
1001 .unwrap();
1002 match event {
1003 ThreadEvent::ToolCallUpdate(acp_thread::ToolCallUpdate::UpdateFields(update)) => update,
1004 event => {
1005 panic!("Unexpected event {event:?}");
1006 }
1007 }
1008}
1009
1010async fn expect_plan(events: &mut UnboundedReceiver<Result<ThreadEvent>>) -> acp::Plan {
1011 let event = events
1012 .next()
1013 .await
1014 .expect("no plan event received")
1015 .unwrap();
1016 match event {
1017 ThreadEvent::Plan(plan) => plan,
1018 event => {
1019 panic!("Unexpected event {event:?}");
1020 }
1021 }
1022}
1023
1024async fn next_tool_call_authorization(
1025 events: &mut UnboundedReceiver<Result<ThreadEvent>>,
1026) -> ToolCallAuthorization {
1027 loop {
1028 let event = events
1029 .next()
1030 .await
1031 .expect("no tool call authorization event received")
1032 .unwrap();
1033 if let ThreadEvent::ToolCallAuthorization(tool_call_authorization) = event {
1034 let permission_kinds = tool_call_authorization
1035 .options
1036 .first_option_of_kind(acp::PermissionOptionKind::AllowAlways)
1037 .map(|option| option.kind);
1038 let allow_once = tool_call_authorization
1039 .options
1040 .first_option_of_kind(acp::PermissionOptionKind::AllowOnce)
1041 .map(|option| option.kind);
1042
1043 assert_eq!(
1044 permission_kinds,
1045 Some(acp::PermissionOptionKind::AllowAlways)
1046 );
1047 assert_eq!(allow_once, Some(acp::PermissionOptionKind::AllowOnce));
1048 return tool_call_authorization;
1049 }
1050 }
1051}
1052
1053#[test]
1054fn test_permission_options_terminal_with_pattern() {
1055 let permission_options = ToolPermissionContext::new(
1056 TerminalTool::NAME,
1057 vec!["cargo build --release".to_string()],
1058 )
1059 .build_permission_options();
1060
1061 let PermissionOptions::Dropdown(choices) = permission_options else {
1062 panic!("Expected dropdown permission options");
1063 };
1064
1065 assert_eq!(choices.len(), 3);
1066 let labels: Vec<&str> = choices
1067 .iter()
1068 .map(|choice| choice.allow.name.as_ref())
1069 .collect();
1070 assert!(labels.contains(&"Always for terminal"));
1071 assert!(labels.contains(&"Always for `cargo build` commands"));
1072 assert!(labels.contains(&"Only this time"));
1073}
1074
1075#[test]
1076fn test_permission_options_terminal_command_with_flag_second_token() {
1077 let permission_options =
1078 ToolPermissionContext::new(TerminalTool::NAME, vec!["ls -la".to_string()])
1079 .build_permission_options();
1080
1081 let PermissionOptions::Dropdown(choices) = permission_options else {
1082 panic!("Expected dropdown permission options");
1083 };
1084
1085 assert_eq!(choices.len(), 3);
1086 let labels: Vec<&str> = choices
1087 .iter()
1088 .map(|choice| choice.allow.name.as_ref())
1089 .collect();
1090 assert!(labels.contains(&"Always for terminal"));
1091 assert!(labels.contains(&"Always for `ls` commands"));
1092 assert!(labels.contains(&"Only this time"));
1093}
1094
1095#[test]
1096fn test_permission_options_terminal_single_word_command() {
1097 let permission_options =
1098 ToolPermissionContext::new(TerminalTool::NAME, vec!["whoami".to_string()])
1099 .build_permission_options();
1100
1101 let PermissionOptions::Dropdown(choices) = permission_options else {
1102 panic!("Expected dropdown permission options");
1103 };
1104
1105 assert_eq!(choices.len(), 3);
1106 let labels: Vec<&str> = choices
1107 .iter()
1108 .map(|choice| choice.allow.name.as_ref())
1109 .collect();
1110 assert!(labels.contains(&"Always for terminal"));
1111 assert!(labels.contains(&"Always for `whoami` commands"));
1112 assert!(labels.contains(&"Only this time"));
1113}
1114
1115#[test]
1116fn test_permission_options_edit_file_with_path_pattern() {
1117 let permission_options =
1118 ToolPermissionContext::new(EditFileTool::NAME, vec!["src/main.rs".to_string()])
1119 .build_permission_options();
1120
1121 let PermissionOptions::Dropdown(choices) = permission_options else {
1122 panic!("Expected dropdown permission options");
1123 };
1124
1125 let labels: Vec<&str> = choices
1126 .iter()
1127 .map(|choice| choice.allow.name.as_ref())
1128 .collect();
1129 assert!(labels.contains(&"Always for edit file"));
1130 assert!(labels.contains(&"Always for `src/`"));
1131}
1132
1133#[test]
1134fn test_permission_options_fetch_with_domain_pattern() {
1135 let permission_options =
1136 ToolPermissionContext::new(FetchTool::NAME, vec!["https://docs.rs/gpui".to_string()])
1137 .build_permission_options();
1138
1139 let PermissionOptions::Dropdown(choices) = permission_options else {
1140 panic!("Expected dropdown permission options");
1141 };
1142
1143 let labels: Vec<&str> = choices
1144 .iter()
1145 .map(|choice| choice.allow.name.as_ref())
1146 .collect();
1147 assert!(labels.contains(&"Always for fetch"));
1148 assert!(labels.contains(&"Always for `docs.rs`"));
1149}
1150
1151#[test]
1152fn test_permission_options_without_pattern() {
1153 let permission_options = ToolPermissionContext::new(
1154 TerminalTool::NAME,
1155 vec!["./deploy.sh --production".to_string()],
1156 )
1157 .build_permission_options();
1158
1159 let PermissionOptions::Dropdown(choices) = permission_options else {
1160 panic!("Expected dropdown permission options");
1161 };
1162
1163 assert_eq!(choices.len(), 2);
1164 let labels: Vec<&str> = choices
1165 .iter()
1166 .map(|choice| choice.allow.name.as_ref())
1167 .collect();
1168 assert!(labels.contains(&"Always for terminal"));
1169 assert!(labels.contains(&"Only this time"));
1170 assert!(!labels.iter().any(|label| label.contains("commands")));
1171}
1172
1173#[test]
1174fn test_permission_options_symlink_target_are_flat_once_only() {
1175 let permission_options =
1176 ToolPermissionContext::symlink_target(EditFileTool::NAME, vec!["/outside/file.txt".into()])
1177 .build_permission_options();
1178
1179 let PermissionOptions::Flat(options) = permission_options else {
1180 panic!("Expected flat permission options for symlink target authorization");
1181 };
1182
1183 assert_eq!(options.len(), 2);
1184 assert!(options.iter().any(|option| {
1185 option.option_id.0.as_ref() == "allow"
1186 && option.kind == acp::PermissionOptionKind::AllowOnce
1187 }));
1188 assert!(options.iter().any(|option| {
1189 option.option_id.0.as_ref() == "deny"
1190 && option.kind == acp::PermissionOptionKind::RejectOnce
1191 }));
1192}
1193
1194#[test]
1195fn test_permission_option_ids_for_terminal() {
1196 let permission_options = ToolPermissionContext::new(
1197 TerminalTool::NAME,
1198 vec!["cargo build --release".to_string()],
1199 )
1200 .build_permission_options();
1201
1202 let PermissionOptions::Dropdown(choices) = permission_options else {
1203 panic!("Expected dropdown permission options");
1204 };
1205
1206 // Expect 3 choices: always-tool, always-pattern, once
1207 assert_eq!(choices.len(), 3);
1208
1209 // First two choices both use the tool-level option IDs
1210 assert_eq!(
1211 choices[0].allow.option_id.0.as_ref(),
1212 "always_allow:terminal"
1213 );
1214 assert_eq!(choices[0].deny.option_id.0.as_ref(), "always_deny:terminal");
1215 assert!(choices[0].sub_patterns.is_empty());
1216
1217 assert_eq!(
1218 choices[1].allow.option_id.0.as_ref(),
1219 "always_allow:terminal"
1220 );
1221 assert_eq!(choices[1].deny.option_id.0.as_ref(), "always_deny:terminal");
1222 assert_eq!(choices[1].sub_patterns, vec!["^cargo\\s+build(\\s|$)"]);
1223
1224 // Third choice is the one-time allow/deny
1225 assert_eq!(choices[2].allow.option_id.0.as_ref(), "allow");
1226 assert_eq!(choices[2].deny.option_id.0.as_ref(), "deny");
1227 assert!(choices[2].sub_patterns.is_empty());
1228}
1229
1230#[test]
1231fn test_permission_options_terminal_pipeline_produces_dropdown_with_patterns() {
1232 let permission_options = ToolPermissionContext::new(
1233 TerminalTool::NAME,
1234 vec!["cargo test 2>&1 | tail".to_string()],
1235 )
1236 .build_permission_options();
1237
1238 let PermissionOptions::DropdownWithPatterns {
1239 choices,
1240 patterns,
1241 tool_name,
1242 } = permission_options
1243 else {
1244 panic!("Expected DropdownWithPatterns permission options for pipeline command");
1245 };
1246
1247 assert_eq!(tool_name, TerminalTool::NAME);
1248
1249 // Should have "Always for terminal" and "Only this time" choices
1250 assert_eq!(choices.len(), 2);
1251 let labels: Vec<&str> = choices
1252 .iter()
1253 .map(|choice| choice.allow.name.as_ref())
1254 .collect();
1255 assert!(labels.contains(&"Always for terminal"));
1256 assert!(labels.contains(&"Only this time"));
1257
1258 // Should have per-command patterns for "cargo test" and "tail"
1259 assert_eq!(patterns.len(), 2);
1260 let pattern_names: Vec<&str> = patterns.iter().map(|cp| cp.display_name.as_str()).collect();
1261 assert!(pattern_names.contains(&"cargo test"));
1262 assert!(pattern_names.contains(&"tail"));
1263
1264 // Verify patterns are valid regex patterns
1265 let regex_patterns: Vec<&str> = patterns.iter().map(|cp| cp.pattern.as_str()).collect();
1266 assert!(regex_patterns.contains(&"^cargo\\s+test(\\s|$)"));
1267 assert!(regex_patterns.contains(&"^tail\\b"));
1268}
1269
1270#[test]
1271fn test_permission_options_terminal_pipeline_with_chaining() {
1272 let permission_options = ToolPermissionContext::new(
1273 TerminalTool::NAME,
1274 vec!["npm install && npm test | tail".to_string()],
1275 )
1276 .build_permission_options();
1277
1278 let PermissionOptions::DropdownWithPatterns { patterns, .. } = permission_options else {
1279 panic!("Expected DropdownWithPatterns for chained pipeline command");
1280 };
1281
1282 // With subcommand-aware patterns, "npm install" and "npm test" are distinct
1283 assert_eq!(patterns.len(), 3);
1284 let pattern_names: Vec<&str> = patterns.iter().map(|cp| cp.display_name.as_str()).collect();
1285 assert!(pattern_names.contains(&"npm install"));
1286 assert!(pattern_names.contains(&"npm test"));
1287 assert!(pattern_names.contains(&"tail"));
1288}
1289
1290#[gpui::test]
1291#[cfg_attr(not(feature = "e2e"), ignore)]
1292async fn test_concurrent_tool_calls(cx: &mut TestAppContext) {
1293 let ThreadTest { thread, .. } = setup(cx, TestModel::Sonnet4).await;
1294
1295 // Test concurrent tool calls with different delay times
1296 let events = thread
1297 .update(cx, |thread, cx| {
1298 thread.add_tool(DelayTool);
1299 thread.send(
1300 UserMessageId::new(),
1301 [
1302 "Call the delay tool twice in the same message.",
1303 "Once with 100ms. Once with 300ms.",
1304 "When both timers are complete, describe the outputs.",
1305 ],
1306 cx,
1307 )
1308 })
1309 .unwrap()
1310 .collect()
1311 .await;
1312
1313 let stop_reasons = stop_events(events);
1314 assert_eq!(stop_reasons, vec![acp::StopReason::EndTurn]);
1315
1316 thread.update(cx, |thread, _cx| {
1317 let last_message = thread.last_received_or_pending_message().unwrap();
1318 let agent_message = last_message.as_agent_message().unwrap();
1319 let text = agent_message
1320 .content
1321 .iter()
1322 .filter_map(|content| {
1323 if let AgentMessageContent::Text(text) = content {
1324 Some(text.as_str())
1325 } else {
1326 None
1327 }
1328 })
1329 .collect::<String>();
1330
1331 assert!(text.contains("Ding"));
1332 });
1333}
1334
1335#[gpui::test]
1336async fn test_profiles(cx: &mut TestAppContext) {
1337 let ThreadTest {
1338 model, thread, fs, ..
1339 } = setup(cx, TestModel::Fake).await;
1340 let fake_model = model.as_fake();
1341
1342 thread.update(cx, |thread, _cx| {
1343 thread.add_tool(DelayTool);
1344 thread.add_tool(EchoTool);
1345 thread.add_tool(InfiniteTool);
1346 });
1347
1348 // Override profiles and wait for settings to be loaded.
1349 fs.insert_file(
1350 paths::settings_file(),
1351 json!({
1352 "agent": {
1353 "profiles": {
1354 "test-1": {
1355 "name": "Test Profile 1",
1356 "tools": {
1357 EchoTool::NAME: true,
1358 DelayTool::NAME: true,
1359 }
1360 },
1361 "test-2": {
1362 "name": "Test Profile 2",
1363 "tools": {
1364 InfiniteTool::NAME: true,
1365 }
1366 }
1367 }
1368 }
1369 })
1370 .to_string()
1371 .into_bytes(),
1372 )
1373 .await;
1374 cx.run_until_parked();
1375
1376 // Test that test-1 profile (default) has echo and delay tools
1377 thread
1378 .update(cx, |thread, cx| {
1379 thread.set_profile(AgentProfileId("test-1".into()), cx);
1380 thread.send(UserMessageId::new(), ["test"], cx)
1381 })
1382 .unwrap();
1383 cx.run_until_parked();
1384
1385 let mut pending_completions = fake_model.pending_completions();
1386 assert_eq!(pending_completions.len(), 1);
1387 let completion = pending_completions.pop().unwrap();
1388 let tool_names: Vec<String> = completion
1389 .tools
1390 .iter()
1391 .map(|tool| tool.name.clone())
1392 .collect();
1393 assert_eq!(tool_names, vec![DelayTool::NAME, EchoTool::NAME]);
1394 fake_model.end_last_completion_stream();
1395
1396 // Switch to test-2 profile, and verify that it has only the infinite tool.
1397 thread
1398 .update(cx, |thread, cx| {
1399 thread.set_profile(AgentProfileId("test-2".into()), cx);
1400 thread.send(UserMessageId::new(), ["test2"], cx)
1401 })
1402 .unwrap();
1403 cx.run_until_parked();
1404 let mut pending_completions = fake_model.pending_completions();
1405 assert_eq!(pending_completions.len(), 1);
1406 let completion = pending_completions.pop().unwrap();
1407 let tool_names: Vec<String> = completion
1408 .tools
1409 .iter()
1410 .map(|tool| tool.name.clone())
1411 .collect();
1412 assert_eq!(tool_names, vec![InfiniteTool::NAME]);
1413}
1414
1415#[gpui::test]
1416async fn test_mcp_tools(cx: &mut TestAppContext) {
1417 let ThreadTest {
1418 model,
1419 thread,
1420 context_server_store,
1421 fs,
1422 ..
1423 } = setup(cx, TestModel::Fake).await;
1424 let fake_model = model.as_fake();
1425
1426 // Override profiles and wait for settings to be loaded.
1427 fs.insert_file(
1428 paths::settings_file(),
1429 json!({
1430 "agent": {
1431 "tool_permissions": { "default": "allow" },
1432 "profiles": {
1433 "test": {
1434 "name": "Test Profile",
1435 "enable_all_context_servers": true,
1436 "tools": {
1437 EchoTool::NAME: true,
1438 }
1439 },
1440 }
1441 }
1442 })
1443 .to_string()
1444 .into_bytes(),
1445 )
1446 .await;
1447 cx.run_until_parked();
1448 thread.update(cx, |thread, cx| {
1449 thread.set_profile(AgentProfileId("test".into()), cx)
1450 });
1451
1452 let mut mcp_tool_calls = setup_context_server(
1453 "test_server",
1454 vec![context_server::types::Tool {
1455 name: "echo".into(),
1456 description: None,
1457 input_schema: serde_json::to_value(EchoTool::input_schema(
1458 LanguageModelToolSchemaFormat::JsonSchema,
1459 ))
1460 .unwrap(),
1461 output_schema: None,
1462 annotations: None,
1463 }],
1464 &context_server_store,
1465 cx,
1466 );
1467
1468 let events = thread.update(cx, |thread, cx| {
1469 thread.send(UserMessageId::new(), ["Hey"], cx).unwrap()
1470 });
1471 cx.run_until_parked();
1472
1473 // Simulate the model calling the MCP tool.
1474 let completion = fake_model.pending_completions().pop().unwrap();
1475 assert_eq!(tool_names_for_completion(&completion), vec!["echo"]);
1476 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
1477 LanguageModelToolUse {
1478 id: "tool_1".into(),
1479 name: "echo".into(),
1480 raw_input: json!({"text": "test"}).to_string(),
1481 input: json!({"text": "test"}),
1482 is_input_complete: true,
1483 thought_signature: None,
1484 },
1485 ));
1486 fake_model.end_last_completion_stream();
1487 cx.run_until_parked();
1488
1489 let (tool_call_params, tool_call_response) = mcp_tool_calls.next().await.unwrap();
1490 assert_eq!(tool_call_params.name, "echo");
1491 assert_eq!(tool_call_params.arguments, Some(json!({"text": "test"})));
1492 tool_call_response
1493 .send(context_server::types::CallToolResponse {
1494 content: vec![context_server::types::ToolResponseContent::Text {
1495 text: "test".into(),
1496 }],
1497 is_error: None,
1498 meta: None,
1499 structured_content: None,
1500 })
1501 .unwrap();
1502 cx.run_until_parked();
1503
1504 assert_eq!(tool_names_for_completion(&completion), vec!["echo"]);
1505 fake_model.send_last_completion_stream_text_chunk("Done!");
1506 fake_model.end_last_completion_stream();
1507 events.collect::<Vec<_>>().await;
1508
1509 // Send again after adding the echo tool, ensuring the name collision is resolved.
1510 let events = thread.update(cx, |thread, cx| {
1511 thread.add_tool(EchoTool);
1512 thread.send(UserMessageId::new(), ["Go"], cx).unwrap()
1513 });
1514 cx.run_until_parked();
1515 let completion = fake_model.pending_completions().pop().unwrap();
1516 assert_eq!(
1517 tool_names_for_completion(&completion),
1518 vec!["echo", "test_server_echo"]
1519 );
1520 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
1521 LanguageModelToolUse {
1522 id: "tool_2".into(),
1523 name: "test_server_echo".into(),
1524 raw_input: json!({"text": "mcp"}).to_string(),
1525 input: json!({"text": "mcp"}),
1526 is_input_complete: true,
1527 thought_signature: None,
1528 },
1529 ));
1530 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
1531 LanguageModelToolUse {
1532 id: "tool_3".into(),
1533 name: "echo".into(),
1534 raw_input: json!({"text": "native"}).to_string(),
1535 input: json!({"text": "native"}),
1536 is_input_complete: true,
1537 thought_signature: None,
1538 },
1539 ));
1540 fake_model.end_last_completion_stream();
1541 cx.run_until_parked();
1542
1543 let (tool_call_params, tool_call_response) = mcp_tool_calls.next().await.unwrap();
1544 assert_eq!(tool_call_params.name, "echo");
1545 assert_eq!(tool_call_params.arguments, Some(json!({"text": "mcp"})));
1546 tool_call_response
1547 .send(context_server::types::CallToolResponse {
1548 content: vec![context_server::types::ToolResponseContent::Text { text: "mcp".into() }],
1549 is_error: None,
1550 meta: None,
1551 structured_content: None,
1552 })
1553 .unwrap();
1554 cx.run_until_parked();
1555
1556 // Ensure the tool results were inserted with the correct names.
1557 let completion = fake_model.pending_completions().pop().unwrap();
1558 assert_eq!(
1559 completion.messages.last().unwrap().content,
1560 vec![
1561 MessageContent::ToolResult(LanguageModelToolResult {
1562 tool_use_id: "tool_3".into(),
1563 tool_name: "echo".into(),
1564 is_error: false,
1565 content: "native".into(),
1566 output: Some("native".into()),
1567 },),
1568 MessageContent::ToolResult(LanguageModelToolResult {
1569 tool_use_id: "tool_2".into(),
1570 tool_name: "test_server_echo".into(),
1571 is_error: false,
1572 content: "mcp".into(),
1573 output: Some("mcp".into()),
1574 },),
1575 ]
1576 );
1577 fake_model.end_last_completion_stream();
1578 events.collect::<Vec<_>>().await;
1579}
1580
1581#[gpui::test]
1582async fn test_mcp_tool_result_displayed_when_server_disconnected(cx: &mut TestAppContext) {
1583 let ThreadTest {
1584 model,
1585 thread,
1586 context_server_store,
1587 fs,
1588 ..
1589 } = setup(cx, TestModel::Fake).await;
1590 let fake_model = model.as_fake();
1591
1592 // Setup settings to allow MCP tools
1593 fs.insert_file(
1594 paths::settings_file(),
1595 json!({
1596 "agent": {
1597 "always_allow_tool_actions": true,
1598 "profiles": {
1599 "test": {
1600 "name": "Test Profile",
1601 "enable_all_context_servers": true,
1602 "tools": {}
1603 },
1604 }
1605 }
1606 })
1607 .to_string()
1608 .into_bytes(),
1609 )
1610 .await;
1611 cx.run_until_parked();
1612 thread.update(cx, |thread, cx| {
1613 thread.set_profile(AgentProfileId("test".into()), cx)
1614 });
1615
1616 // Setup a context server with a tool
1617 let mut mcp_tool_calls = setup_context_server(
1618 "github_server",
1619 vec![context_server::types::Tool {
1620 name: "issue_read".into(),
1621 description: Some("Read a GitHub issue".into()),
1622 input_schema: json!({
1623 "type": "object",
1624 "properties": {
1625 "issue_url": { "type": "string" }
1626 }
1627 }),
1628 output_schema: None,
1629 annotations: None,
1630 }],
1631 &context_server_store,
1632 cx,
1633 );
1634
1635 // Send a message and have the model call the MCP tool
1636 let events = thread.update(cx, |thread, cx| {
1637 thread
1638 .send(UserMessageId::new(), ["Read issue #47404"], cx)
1639 .unwrap()
1640 });
1641 cx.run_until_parked();
1642
1643 // Verify the MCP tool is available to the model
1644 let completion = fake_model.pending_completions().pop().unwrap();
1645 assert_eq!(
1646 tool_names_for_completion(&completion),
1647 vec!["issue_read"],
1648 "MCP tool should be available"
1649 );
1650
1651 // Simulate the model calling the MCP tool
1652 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
1653 LanguageModelToolUse {
1654 id: "tool_1".into(),
1655 name: "issue_read".into(),
1656 raw_input: json!({"issue_url": "https://github.com/zed-industries/zed/issues/47404"})
1657 .to_string(),
1658 input: json!({"issue_url": "https://github.com/zed-industries/zed/issues/47404"}),
1659 is_input_complete: true,
1660 thought_signature: None,
1661 },
1662 ));
1663 fake_model.end_last_completion_stream();
1664 cx.run_until_parked();
1665
1666 // The MCP server receives the tool call and responds with content
1667 let expected_tool_output = "Issue #47404: Tool call results are cleared upon app close";
1668 let (tool_call_params, tool_call_response) = mcp_tool_calls.next().await.unwrap();
1669 assert_eq!(tool_call_params.name, "issue_read");
1670 tool_call_response
1671 .send(context_server::types::CallToolResponse {
1672 content: vec![context_server::types::ToolResponseContent::Text {
1673 text: expected_tool_output.into(),
1674 }],
1675 is_error: None,
1676 meta: None,
1677 structured_content: None,
1678 })
1679 .unwrap();
1680 cx.run_until_parked();
1681
1682 // After tool completes, the model continues with a new completion request
1683 // that includes the tool results. We need to respond to this.
1684 let _completion = fake_model.pending_completions().pop().unwrap();
1685 fake_model.send_last_completion_stream_text_chunk("I found the issue!");
1686 fake_model
1687 .send_last_completion_stream_event(LanguageModelCompletionEvent::Stop(StopReason::EndTurn));
1688 fake_model.end_last_completion_stream();
1689 events.collect::<Vec<_>>().await;
1690
1691 // Verify the tool result is stored in the thread by checking the markdown output.
1692 // The tool result is in the first assistant message (not the last one, which is
1693 // the model's response after the tool completed).
1694 thread.update(cx, |thread, _cx| {
1695 let markdown = thread.to_markdown();
1696 assert!(
1697 markdown.contains("**Tool Result**: issue_read"),
1698 "Thread should contain tool result header"
1699 );
1700 assert!(
1701 markdown.contains(expected_tool_output),
1702 "Thread should contain tool output: {}",
1703 expected_tool_output
1704 );
1705 });
1706
1707 // Simulate app restart: disconnect the MCP server.
1708 // After restart, the MCP server won't be connected yet when the thread is replayed.
1709 context_server_store.update(cx, |store, cx| {
1710 let _ = store.stop_server(&ContextServerId("github_server".into()), cx);
1711 });
1712 cx.run_until_parked();
1713
1714 // Replay the thread (this is what happens when loading a saved thread)
1715 let mut replay_events = thread.update(cx, |thread, cx| thread.replay(cx));
1716
1717 let mut found_tool_call = None;
1718 let mut found_tool_call_update_with_output = None;
1719
1720 while let Some(event) = replay_events.next().await {
1721 let event = event.unwrap();
1722 match &event {
1723 ThreadEvent::ToolCall(tc) if tc.tool_call_id.to_string() == "tool_1" => {
1724 found_tool_call = Some(tc.clone());
1725 }
1726 ThreadEvent::ToolCallUpdate(acp_thread::ToolCallUpdate::UpdateFields(update))
1727 if update.tool_call_id.to_string() == "tool_1" =>
1728 {
1729 if update.fields.raw_output.is_some() {
1730 found_tool_call_update_with_output = Some(update.clone());
1731 }
1732 }
1733 _ => {}
1734 }
1735 }
1736
1737 // The tool call should be found
1738 assert!(
1739 found_tool_call.is_some(),
1740 "Tool call should be emitted during replay"
1741 );
1742
1743 assert!(
1744 found_tool_call_update_with_output.is_some(),
1745 "ToolCallUpdate with raw_output should be emitted even when MCP server is disconnected."
1746 );
1747
1748 let update = found_tool_call_update_with_output.unwrap();
1749 assert_eq!(
1750 update.fields.raw_output,
1751 Some(expected_tool_output.into()),
1752 "raw_output should contain the saved tool result"
1753 );
1754
1755 // Also verify the status is correct (completed, not failed)
1756 assert_eq!(
1757 update.fields.status,
1758 Some(acp::ToolCallStatus::Completed),
1759 "Tool call status should reflect the original completion status"
1760 );
1761}
1762
1763#[gpui::test]
1764async fn test_mcp_tool_truncation(cx: &mut TestAppContext) {
1765 let ThreadTest {
1766 model,
1767 thread,
1768 context_server_store,
1769 fs,
1770 ..
1771 } = setup(cx, TestModel::Fake).await;
1772 let fake_model = model.as_fake();
1773
1774 // Set up a profile with all tools enabled
1775 fs.insert_file(
1776 paths::settings_file(),
1777 json!({
1778 "agent": {
1779 "profiles": {
1780 "test": {
1781 "name": "Test Profile",
1782 "enable_all_context_servers": true,
1783 "tools": {
1784 EchoTool::NAME: true,
1785 DelayTool::NAME: true,
1786 WordListTool::NAME: true,
1787 ToolRequiringPermission::NAME: true,
1788 InfiniteTool::NAME: true,
1789 }
1790 },
1791 }
1792 }
1793 })
1794 .to_string()
1795 .into_bytes(),
1796 )
1797 .await;
1798 cx.run_until_parked();
1799
1800 thread.update(cx, |thread, cx| {
1801 thread.set_profile(AgentProfileId("test".into()), cx);
1802 thread.add_tool(EchoTool);
1803 thread.add_tool(DelayTool);
1804 thread.add_tool(WordListTool);
1805 thread.add_tool(ToolRequiringPermission);
1806 thread.add_tool(InfiniteTool);
1807 });
1808
1809 // Set up multiple context servers with some overlapping tool names
1810 let _server1_calls = setup_context_server(
1811 "xxx",
1812 vec![
1813 context_server::types::Tool {
1814 name: "echo".into(), // Conflicts with native EchoTool
1815 description: None,
1816 input_schema: serde_json::to_value(EchoTool::input_schema(
1817 LanguageModelToolSchemaFormat::JsonSchema,
1818 ))
1819 .unwrap(),
1820 output_schema: None,
1821 annotations: None,
1822 },
1823 context_server::types::Tool {
1824 name: "unique_tool_1".into(),
1825 description: None,
1826 input_schema: json!({"type": "object", "properties": {}}),
1827 output_schema: None,
1828 annotations: None,
1829 },
1830 ],
1831 &context_server_store,
1832 cx,
1833 );
1834
1835 let _server2_calls = setup_context_server(
1836 "yyy",
1837 vec![
1838 context_server::types::Tool {
1839 name: "echo".into(), // Also conflicts with native EchoTool
1840 description: None,
1841 input_schema: serde_json::to_value(EchoTool::input_schema(
1842 LanguageModelToolSchemaFormat::JsonSchema,
1843 ))
1844 .unwrap(),
1845 output_schema: None,
1846 annotations: None,
1847 },
1848 context_server::types::Tool {
1849 name: "unique_tool_2".into(),
1850 description: None,
1851 input_schema: json!({"type": "object", "properties": {}}),
1852 output_schema: None,
1853 annotations: None,
1854 },
1855 context_server::types::Tool {
1856 name: "a".repeat(MAX_TOOL_NAME_LENGTH - 2),
1857 description: None,
1858 input_schema: json!({"type": "object", "properties": {}}),
1859 output_schema: None,
1860 annotations: None,
1861 },
1862 context_server::types::Tool {
1863 name: "b".repeat(MAX_TOOL_NAME_LENGTH - 1),
1864 description: None,
1865 input_schema: json!({"type": "object", "properties": {}}),
1866 output_schema: None,
1867 annotations: None,
1868 },
1869 ],
1870 &context_server_store,
1871 cx,
1872 );
1873 let _server3_calls = setup_context_server(
1874 "zzz",
1875 vec![
1876 context_server::types::Tool {
1877 name: "a".repeat(MAX_TOOL_NAME_LENGTH - 2),
1878 description: None,
1879 input_schema: json!({"type": "object", "properties": {}}),
1880 output_schema: None,
1881 annotations: None,
1882 },
1883 context_server::types::Tool {
1884 name: "b".repeat(MAX_TOOL_NAME_LENGTH - 1),
1885 description: None,
1886 input_schema: json!({"type": "object", "properties": {}}),
1887 output_schema: None,
1888 annotations: None,
1889 },
1890 context_server::types::Tool {
1891 name: "c".repeat(MAX_TOOL_NAME_LENGTH + 1),
1892 description: None,
1893 input_schema: json!({"type": "object", "properties": {}}),
1894 output_schema: None,
1895 annotations: None,
1896 },
1897 ],
1898 &context_server_store,
1899 cx,
1900 );
1901
1902 // Server with spaces in name - tests snake_case conversion for API compatibility
1903 let _server4_calls = setup_context_server(
1904 "Azure DevOps",
1905 vec![context_server::types::Tool {
1906 name: "echo".into(), // Also conflicts - will be disambiguated as azure_dev_ops_echo
1907 description: None,
1908 input_schema: serde_json::to_value(EchoTool::input_schema(
1909 LanguageModelToolSchemaFormat::JsonSchema,
1910 ))
1911 .unwrap(),
1912 output_schema: None,
1913 annotations: None,
1914 }],
1915 &context_server_store,
1916 cx,
1917 );
1918
1919 thread
1920 .update(cx, |thread, cx| {
1921 thread.send(UserMessageId::new(), ["Go"], cx)
1922 })
1923 .unwrap();
1924 cx.run_until_parked();
1925 let completion = fake_model.pending_completions().pop().unwrap();
1926 assert_eq!(
1927 tool_names_for_completion(&completion),
1928 vec![
1929 "azure_dev_ops_echo",
1930 "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
1931 "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc",
1932 "delay",
1933 "echo",
1934 "infinite",
1935 "tool_requiring_permission",
1936 "unique_tool_1",
1937 "unique_tool_2",
1938 "word_list",
1939 "xxx_echo",
1940 "y_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
1941 "yyy_echo",
1942 "z_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
1943 ]
1944 );
1945}
1946
1947#[gpui::test]
1948#[cfg_attr(not(feature = "e2e"), ignore)]
1949async fn test_cancellation(cx: &mut TestAppContext) {
1950 let ThreadTest { thread, .. } = setup(cx, TestModel::Sonnet4).await;
1951
1952 let mut events = thread
1953 .update(cx, |thread, cx| {
1954 thread.add_tool(InfiniteTool);
1955 thread.add_tool(EchoTool);
1956 thread.send(
1957 UserMessageId::new(),
1958 ["Call the echo tool, then call the infinite tool, then explain their output"],
1959 cx,
1960 )
1961 })
1962 .unwrap();
1963
1964 // Wait until both tools are called.
1965 let mut expected_tools = vec!["Echo", "Infinite Tool"];
1966 let mut echo_id = None;
1967 let mut echo_completed = false;
1968 while let Some(event) = events.next().await {
1969 match event.unwrap() {
1970 ThreadEvent::ToolCall(tool_call) => {
1971 assert_eq!(tool_call.title, expected_tools.remove(0));
1972 if tool_call.title == "Echo" {
1973 echo_id = Some(tool_call.tool_call_id);
1974 }
1975 }
1976 ThreadEvent::ToolCallUpdate(acp_thread::ToolCallUpdate::UpdateFields(
1977 acp::ToolCallUpdate {
1978 tool_call_id,
1979 fields:
1980 acp::ToolCallUpdateFields {
1981 status: Some(acp::ToolCallStatus::Completed),
1982 ..
1983 },
1984 ..
1985 },
1986 )) if Some(&tool_call_id) == echo_id.as_ref() => {
1987 echo_completed = true;
1988 }
1989 _ => {}
1990 }
1991
1992 if expected_tools.is_empty() && echo_completed {
1993 break;
1994 }
1995 }
1996
1997 // Cancel the current send and ensure that the event stream is closed, even
1998 // if one of the tools is still running.
1999 thread.update(cx, |thread, cx| thread.cancel(cx)).await;
2000 let events = events.collect::<Vec<_>>().await;
2001 let last_event = events.last();
2002 assert!(
2003 matches!(
2004 last_event,
2005 Some(Ok(ThreadEvent::Stop(acp::StopReason::Cancelled)))
2006 ),
2007 "unexpected event {last_event:?}"
2008 );
2009
2010 // Ensure we can still send a new message after cancellation.
2011 let events = thread
2012 .update(cx, |thread, cx| {
2013 thread.send(
2014 UserMessageId::new(),
2015 ["Testing: reply with 'Hello' then stop."],
2016 cx,
2017 )
2018 })
2019 .unwrap()
2020 .collect::<Vec<_>>()
2021 .await;
2022 thread.update(cx, |thread, _cx| {
2023 let message = thread.last_received_or_pending_message().unwrap();
2024 let agent_message = message.as_agent_message().unwrap();
2025 assert_eq!(
2026 agent_message.content,
2027 vec![AgentMessageContent::Text("Hello".to_string())]
2028 );
2029 });
2030 assert_eq!(stop_events(events), vec![acp::StopReason::EndTurn]);
2031}
2032
2033#[gpui::test]
2034async fn test_terminal_tool_cancellation_captures_output(cx: &mut TestAppContext) {
2035 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
2036 always_allow_tools(cx);
2037 let fake_model = model.as_fake();
2038
2039 let environment = Rc::new(cx.update(|cx| {
2040 FakeThreadEnvironment::default().with_terminal(FakeTerminalHandle::new_never_exits(cx))
2041 }));
2042 let handle = environment.terminal_handle.clone().unwrap();
2043
2044 let mut events = thread
2045 .update(cx, |thread, cx| {
2046 thread.add_tool(crate::TerminalTool::new(
2047 thread.project().clone(),
2048 environment,
2049 ));
2050 thread.send(UserMessageId::new(), ["run a command"], cx)
2051 })
2052 .unwrap();
2053
2054 cx.run_until_parked();
2055
2056 // Simulate the model calling the terminal tool
2057 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
2058 LanguageModelToolUse {
2059 id: "terminal_tool_1".into(),
2060 name: TerminalTool::NAME.into(),
2061 raw_input: r#"{"command": "sleep 1000", "cd": "."}"#.into(),
2062 input: json!({"command": "sleep 1000", "cd": "."}),
2063 is_input_complete: true,
2064 thought_signature: None,
2065 },
2066 ));
2067 fake_model.end_last_completion_stream();
2068
2069 // Wait for the terminal tool to start running
2070 wait_for_terminal_tool_started(&mut events, cx).await;
2071
2072 // Cancel the thread while the terminal is running
2073 thread.update(cx, |thread, cx| thread.cancel(cx)).detach();
2074
2075 // Collect remaining events, driving the executor to let cancellation complete
2076 let remaining_events = collect_events_until_stop(&mut events, cx).await;
2077
2078 // Verify the terminal was killed
2079 assert!(
2080 handle.was_killed(),
2081 "expected terminal handle to be killed on cancellation"
2082 );
2083
2084 // Verify we got a cancellation stop event
2085 assert_eq!(
2086 stop_events(remaining_events),
2087 vec![acp::StopReason::Cancelled],
2088 );
2089
2090 // Verify the tool result contains the terminal output, not just "Tool canceled by user"
2091 thread.update(cx, |thread, _cx| {
2092 let message = thread.last_received_or_pending_message().unwrap();
2093 let agent_message = message.as_agent_message().unwrap();
2094
2095 let tool_use = agent_message
2096 .content
2097 .iter()
2098 .find_map(|content| match content {
2099 AgentMessageContent::ToolUse(tool_use) => Some(tool_use),
2100 _ => None,
2101 })
2102 .expect("expected tool use in agent message");
2103
2104 let tool_result = agent_message
2105 .tool_results
2106 .get(&tool_use.id)
2107 .expect("expected tool result");
2108
2109 let result_text = match &tool_result.content {
2110 language_model::LanguageModelToolResultContent::Text(text) => text.to_string(),
2111 _ => panic!("expected text content in tool result"),
2112 };
2113
2114 // "partial output" comes from FakeTerminalHandle's output field
2115 assert!(
2116 result_text.contains("partial output"),
2117 "expected tool result to contain terminal output, got: {result_text}"
2118 );
2119 // Match the actual format from process_content in terminal_tool.rs
2120 assert!(
2121 result_text.contains("The user stopped this command"),
2122 "expected tool result to indicate user stopped, got: {result_text}"
2123 );
2124 });
2125
2126 // Verify we can send a new message after cancellation
2127 verify_thread_recovery(&thread, &fake_model, cx).await;
2128}
2129
2130#[gpui::test]
2131async fn test_cancellation_aware_tool_responds_to_cancellation(cx: &mut TestAppContext) {
2132 // This test verifies that tools which properly handle cancellation via
2133 // `event_stream.cancelled_by_user()` (like edit_file_tool) respond promptly
2134 // to cancellation and report that they were cancelled.
2135 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
2136 always_allow_tools(cx);
2137 let fake_model = model.as_fake();
2138
2139 let (tool, was_cancelled) = CancellationAwareTool::new();
2140
2141 let mut events = thread
2142 .update(cx, |thread, cx| {
2143 thread.add_tool(tool);
2144 thread.send(
2145 UserMessageId::new(),
2146 ["call the cancellation aware tool"],
2147 cx,
2148 )
2149 })
2150 .unwrap();
2151
2152 cx.run_until_parked();
2153
2154 // Simulate the model calling the cancellation-aware tool
2155 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
2156 LanguageModelToolUse {
2157 id: "cancellation_aware_1".into(),
2158 name: "cancellation_aware".into(),
2159 raw_input: r#"{}"#.into(),
2160 input: json!({}),
2161 is_input_complete: true,
2162 thought_signature: None,
2163 },
2164 ));
2165 fake_model.end_last_completion_stream();
2166
2167 cx.run_until_parked();
2168
2169 // Wait for the tool call to be reported
2170 let mut tool_started = false;
2171 let deadline = cx.executor().num_cpus() * 100;
2172 for _ in 0..deadline {
2173 cx.run_until_parked();
2174
2175 while let Some(Some(event)) = events.next().now_or_never() {
2176 if let Ok(ThreadEvent::ToolCall(tool_call)) = &event {
2177 if tool_call.title == "Cancellation Aware Tool" {
2178 tool_started = true;
2179 break;
2180 }
2181 }
2182 }
2183
2184 if tool_started {
2185 break;
2186 }
2187
2188 cx.background_executor
2189 .timer(Duration::from_millis(10))
2190 .await;
2191 }
2192 assert!(tool_started, "expected cancellation aware tool to start");
2193
2194 // Cancel the thread and wait for it to complete
2195 let cancel_task = thread.update(cx, |thread, cx| thread.cancel(cx));
2196
2197 // The cancel task should complete promptly because the tool handles cancellation
2198 let timeout = cx.background_executor.timer(Duration::from_secs(5));
2199 futures::select! {
2200 _ = cancel_task.fuse() => {}
2201 _ = timeout.fuse() => {
2202 panic!("cancel task timed out - tool did not respond to cancellation");
2203 }
2204 }
2205
2206 // Verify the tool detected cancellation via its flag
2207 assert!(
2208 was_cancelled.load(std::sync::atomic::Ordering::SeqCst),
2209 "tool should have detected cancellation via event_stream.cancelled_by_user()"
2210 );
2211
2212 // Collect remaining events
2213 let remaining_events = collect_events_until_stop(&mut events, cx).await;
2214
2215 // Verify we got a cancellation stop event
2216 assert_eq!(
2217 stop_events(remaining_events),
2218 vec![acp::StopReason::Cancelled],
2219 );
2220
2221 // Verify we can send a new message after cancellation
2222 verify_thread_recovery(&thread, &fake_model, cx).await;
2223}
2224
2225/// Helper to verify thread can recover after cancellation by sending a simple message.
2226async fn verify_thread_recovery(
2227 thread: &Entity<Thread>,
2228 fake_model: &FakeLanguageModel,
2229 cx: &mut TestAppContext,
2230) {
2231 let events = thread
2232 .update(cx, |thread, cx| {
2233 thread.send(
2234 UserMessageId::new(),
2235 ["Testing: reply with 'Hello' then stop."],
2236 cx,
2237 )
2238 })
2239 .unwrap();
2240 cx.run_until_parked();
2241 fake_model.send_last_completion_stream_text_chunk("Hello");
2242 fake_model
2243 .send_last_completion_stream_event(LanguageModelCompletionEvent::Stop(StopReason::EndTurn));
2244 fake_model.end_last_completion_stream();
2245
2246 let events = events.collect::<Vec<_>>().await;
2247 thread.update(cx, |thread, _cx| {
2248 let message = thread.last_received_or_pending_message().unwrap();
2249 let agent_message = message.as_agent_message().unwrap();
2250 assert_eq!(
2251 agent_message.content,
2252 vec![AgentMessageContent::Text("Hello".to_string())]
2253 );
2254 });
2255 assert_eq!(stop_events(events), vec![acp::StopReason::EndTurn]);
2256}
2257
2258/// Waits for a terminal tool to start by watching for a ToolCallUpdate with terminal content.
2259async fn wait_for_terminal_tool_started(
2260 events: &mut mpsc::UnboundedReceiver<Result<ThreadEvent>>,
2261 cx: &mut TestAppContext,
2262) {
2263 let deadline = cx.executor().num_cpus() * 100; // Scale with available parallelism
2264 for _ in 0..deadline {
2265 cx.run_until_parked();
2266
2267 while let Some(Some(event)) = events.next().now_or_never() {
2268 if let Ok(ThreadEvent::ToolCallUpdate(acp_thread::ToolCallUpdate::UpdateFields(
2269 update,
2270 ))) = &event
2271 {
2272 if update.fields.content.as_ref().is_some_and(|content| {
2273 content
2274 .iter()
2275 .any(|c| matches!(c, acp::ToolCallContent::Terminal(_)))
2276 }) {
2277 return;
2278 }
2279 }
2280 }
2281
2282 cx.background_executor
2283 .timer(Duration::from_millis(10))
2284 .await;
2285 }
2286 panic!("terminal tool did not start within the expected time");
2287}
2288
2289/// Collects events until a Stop event is received, driving the executor to completion.
2290async fn collect_events_until_stop(
2291 events: &mut mpsc::UnboundedReceiver<Result<ThreadEvent>>,
2292 cx: &mut TestAppContext,
2293) -> Vec<Result<ThreadEvent>> {
2294 let mut collected = Vec::new();
2295 let deadline = cx.executor().num_cpus() * 200;
2296
2297 for _ in 0..deadline {
2298 cx.executor().advance_clock(Duration::from_millis(10));
2299 cx.run_until_parked();
2300
2301 while let Some(Some(event)) = events.next().now_or_never() {
2302 let is_stop = matches!(&event, Ok(ThreadEvent::Stop(_)));
2303 collected.push(event);
2304 if is_stop {
2305 return collected;
2306 }
2307 }
2308 }
2309 panic!(
2310 "did not receive Stop event within the expected time; collected {} events",
2311 collected.len()
2312 );
2313}
2314
2315#[gpui::test]
2316async fn test_truncate_while_terminal_tool_running(cx: &mut TestAppContext) {
2317 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
2318 always_allow_tools(cx);
2319 let fake_model = model.as_fake();
2320
2321 let environment = Rc::new(cx.update(|cx| {
2322 FakeThreadEnvironment::default().with_terminal(FakeTerminalHandle::new_never_exits(cx))
2323 }));
2324 let handle = environment.terminal_handle.clone().unwrap();
2325
2326 let message_id = UserMessageId::new();
2327 let mut events = thread
2328 .update(cx, |thread, cx| {
2329 thread.add_tool(crate::TerminalTool::new(
2330 thread.project().clone(),
2331 environment,
2332 ));
2333 thread.send(message_id.clone(), ["run a command"], cx)
2334 })
2335 .unwrap();
2336
2337 cx.run_until_parked();
2338
2339 // Simulate the model calling the terminal tool
2340 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
2341 LanguageModelToolUse {
2342 id: "terminal_tool_1".into(),
2343 name: TerminalTool::NAME.into(),
2344 raw_input: r#"{"command": "sleep 1000", "cd": "."}"#.into(),
2345 input: json!({"command": "sleep 1000", "cd": "."}),
2346 is_input_complete: true,
2347 thought_signature: None,
2348 },
2349 ));
2350 fake_model.end_last_completion_stream();
2351
2352 // Wait for the terminal tool to start running
2353 wait_for_terminal_tool_started(&mut events, cx).await;
2354
2355 // Truncate the thread while the terminal is running
2356 thread
2357 .update(cx, |thread, cx| thread.truncate(message_id, cx))
2358 .unwrap();
2359
2360 // Drive the executor to let cancellation complete
2361 let _ = collect_events_until_stop(&mut events, cx).await;
2362
2363 // Verify the terminal was killed
2364 assert!(
2365 handle.was_killed(),
2366 "expected terminal handle to be killed on truncate"
2367 );
2368
2369 // Verify the thread is empty after truncation
2370 thread.update(cx, |thread, _cx| {
2371 assert_eq!(
2372 thread.to_markdown(),
2373 "",
2374 "expected thread to be empty after truncating the only message"
2375 );
2376 });
2377
2378 // Verify we can send a new message after truncation
2379 verify_thread_recovery(&thread, &fake_model, cx).await;
2380}
2381
2382#[gpui::test]
2383async fn test_cancel_multiple_concurrent_terminal_tools(cx: &mut TestAppContext) {
2384 // Tests that cancellation properly kills all running terminal tools when multiple are active.
2385 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
2386 always_allow_tools(cx);
2387 let fake_model = model.as_fake();
2388
2389 let environment = Rc::new(MultiTerminalEnvironment::new());
2390
2391 let mut events = thread
2392 .update(cx, |thread, cx| {
2393 thread.add_tool(crate::TerminalTool::new(
2394 thread.project().clone(),
2395 environment.clone(),
2396 ));
2397 thread.send(UserMessageId::new(), ["run multiple commands"], cx)
2398 })
2399 .unwrap();
2400
2401 cx.run_until_parked();
2402
2403 // Simulate the model calling two terminal tools
2404 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
2405 LanguageModelToolUse {
2406 id: "terminal_tool_1".into(),
2407 name: TerminalTool::NAME.into(),
2408 raw_input: r#"{"command": "sleep 1000", "cd": "."}"#.into(),
2409 input: json!({"command": "sleep 1000", "cd": "."}),
2410 is_input_complete: true,
2411 thought_signature: None,
2412 },
2413 ));
2414 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
2415 LanguageModelToolUse {
2416 id: "terminal_tool_2".into(),
2417 name: TerminalTool::NAME.into(),
2418 raw_input: r#"{"command": "sleep 2000", "cd": "."}"#.into(),
2419 input: json!({"command": "sleep 2000", "cd": "."}),
2420 is_input_complete: true,
2421 thought_signature: None,
2422 },
2423 ));
2424 fake_model.end_last_completion_stream();
2425
2426 // Wait for both terminal tools to start by counting terminal content updates
2427 let mut terminals_started = 0;
2428 let deadline = cx.executor().num_cpus() * 100;
2429 for _ in 0..deadline {
2430 cx.run_until_parked();
2431
2432 while let Some(Some(event)) = events.next().now_or_never() {
2433 if let Ok(ThreadEvent::ToolCallUpdate(acp_thread::ToolCallUpdate::UpdateFields(
2434 update,
2435 ))) = &event
2436 {
2437 if update.fields.content.as_ref().is_some_and(|content| {
2438 content
2439 .iter()
2440 .any(|c| matches!(c, acp::ToolCallContent::Terminal(_)))
2441 }) {
2442 terminals_started += 1;
2443 if terminals_started >= 2 {
2444 break;
2445 }
2446 }
2447 }
2448 }
2449 if terminals_started >= 2 {
2450 break;
2451 }
2452
2453 cx.background_executor
2454 .timer(Duration::from_millis(10))
2455 .await;
2456 }
2457 assert!(
2458 terminals_started >= 2,
2459 "expected 2 terminal tools to start, got {terminals_started}"
2460 );
2461
2462 // Cancel the thread while both terminals are running
2463 thread.update(cx, |thread, cx| thread.cancel(cx)).detach();
2464
2465 // Collect remaining events
2466 let remaining_events = collect_events_until_stop(&mut events, cx).await;
2467
2468 // Verify both terminal handles were killed
2469 let handles = environment.handles();
2470 assert_eq!(
2471 handles.len(),
2472 2,
2473 "expected 2 terminal handles to be created"
2474 );
2475 assert!(
2476 handles[0].was_killed(),
2477 "expected first terminal handle to be killed on cancellation"
2478 );
2479 assert!(
2480 handles[1].was_killed(),
2481 "expected second terminal handle to be killed on cancellation"
2482 );
2483
2484 // Verify we got a cancellation stop event
2485 assert_eq!(
2486 stop_events(remaining_events),
2487 vec![acp::StopReason::Cancelled],
2488 );
2489}
2490
2491#[gpui::test]
2492async fn test_terminal_tool_stopped_via_terminal_card_button(cx: &mut TestAppContext) {
2493 // Tests that clicking the stop button on the terminal card (as opposed to the main
2494 // cancel button) properly reports user stopped via the was_stopped_by_user path.
2495 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
2496 always_allow_tools(cx);
2497 let fake_model = model.as_fake();
2498
2499 let environment = Rc::new(cx.update(|cx| {
2500 FakeThreadEnvironment::default().with_terminal(FakeTerminalHandle::new_never_exits(cx))
2501 }));
2502 let handle = environment.terminal_handle.clone().unwrap();
2503
2504 let mut events = thread
2505 .update(cx, |thread, cx| {
2506 thread.add_tool(crate::TerminalTool::new(
2507 thread.project().clone(),
2508 environment,
2509 ));
2510 thread.send(UserMessageId::new(), ["run a command"], cx)
2511 })
2512 .unwrap();
2513
2514 cx.run_until_parked();
2515
2516 // Simulate the model calling the terminal tool
2517 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
2518 LanguageModelToolUse {
2519 id: "terminal_tool_1".into(),
2520 name: TerminalTool::NAME.into(),
2521 raw_input: r#"{"command": "sleep 1000", "cd": "."}"#.into(),
2522 input: json!({"command": "sleep 1000", "cd": "."}),
2523 is_input_complete: true,
2524 thought_signature: None,
2525 },
2526 ));
2527 fake_model.end_last_completion_stream();
2528
2529 // Wait for the terminal tool to start running
2530 wait_for_terminal_tool_started(&mut events, cx).await;
2531
2532 // Simulate user clicking stop on the terminal card itself.
2533 // This sets the flag and signals exit (simulating what the real UI would do).
2534 handle.set_stopped_by_user(true);
2535 handle.killed.store(true, Ordering::SeqCst);
2536 handle.signal_exit();
2537
2538 // Wait for the tool to complete
2539 cx.run_until_parked();
2540
2541 // The thread continues after tool completion - simulate the model ending its turn
2542 fake_model
2543 .send_last_completion_stream_event(LanguageModelCompletionEvent::Stop(StopReason::EndTurn));
2544 fake_model.end_last_completion_stream();
2545
2546 // Collect remaining events
2547 let remaining_events = collect_events_until_stop(&mut events, cx).await;
2548
2549 // Verify we got an EndTurn (not Cancelled, since we didn't cancel the thread)
2550 assert_eq!(
2551 stop_events(remaining_events),
2552 vec![acp::StopReason::EndTurn],
2553 );
2554
2555 // Verify the tool result indicates user stopped
2556 thread.update(cx, |thread, _cx| {
2557 let message = thread.last_received_or_pending_message().unwrap();
2558 let agent_message = message.as_agent_message().unwrap();
2559
2560 let tool_use = agent_message
2561 .content
2562 .iter()
2563 .find_map(|content| match content {
2564 AgentMessageContent::ToolUse(tool_use) => Some(tool_use),
2565 _ => None,
2566 })
2567 .expect("expected tool use in agent message");
2568
2569 let tool_result = agent_message
2570 .tool_results
2571 .get(&tool_use.id)
2572 .expect("expected tool result");
2573
2574 let result_text = match &tool_result.content {
2575 language_model::LanguageModelToolResultContent::Text(text) => text.to_string(),
2576 _ => panic!("expected text content in tool result"),
2577 };
2578
2579 assert!(
2580 result_text.contains("The user stopped this command"),
2581 "expected tool result to indicate user stopped, got: {result_text}"
2582 );
2583 });
2584}
2585
2586#[gpui::test]
2587async fn test_terminal_tool_timeout_expires(cx: &mut TestAppContext) {
2588 // Tests that when a timeout is configured and expires, the tool result indicates timeout.
2589 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
2590 always_allow_tools(cx);
2591 let fake_model = model.as_fake();
2592
2593 let environment = Rc::new(cx.update(|cx| {
2594 FakeThreadEnvironment::default().with_terminal(FakeTerminalHandle::new_never_exits(cx))
2595 }));
2596 let handle = environment.terminal_handle.clone().unwrap();
2597
2598 let mut events = thread
2599 .update(cx, |thread, cx| {
2600 thread.add_tool(crate::TerminalTool::new(
2601 thread.project().clone(),
2602 environment,
2603 ));
2604 thread.send(UserMessageId::new(), ["run a command with timeout"], cx)
2605 })
2606 .unwrap();
2607
2608 cx.run_until_parked();
2609
2610 // Simulate the model calling the terminal tool with a short timeout
2611 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
2612 LanguageModelToolUse {
2613 id: "terminal_tool_1".into(),
2614 name: TerminalTool::NAME.into(),
2615 raw_input: r#"{"command": "sleep 1000", "cd": ".", "timeout_ms": 100}"#.into(),
2616 input: json!({"command": "sleep 1000", "cd": ".", "timeout_ms": 100}),
2617 is_input_complete: true,
2618 thought_signature: None,
2619 },
2620 ));
2621 fake_model.end_last_completion_stream();
2622
2623 // Wait for the terminal tool to start running
2624 wait_for_terminal_tool_started(&mut events, cx).await;
2625
2626 // Advance clock past the timeout
2627 cx.executor().advance_clock(Duration::from_millis(200));
2628 cx.run_until_parked();
2629
2630 // The thread continues after tool completion - simulate the model ending its turn
2631 fake_model
2632 .send_last_completion_stream_event(LanguageModelCompletionEvent::Stop(StopReason::EndTurn));
2633 fake_model.end_last_completion_stream();
2634
2635 // Collect remaining events
2636 let remaining_events = collect_events_until_stop(&mut events, cx).await;
2637
2638 // Verify the terminal was killed due to timeout
2639 assert!(
2640 handle.was_killed(),
2641 "expected terminal handle to be killed on timeout"
2642 );
2643
2644 // Verify we got an EndTurn (the tool completed, just with timeout)
2645 assert_eq!(
2646 stop_events(remaining_events),
2647 vec![acp::StopReason::EndTurn],
2648 );
2649
2650 // Verify the tool result indicates timeout, not user stopped
2651 thread.update(cx, |thread, _cx| {
2652 let message = thread.last_received_or_pending_message().unwrap();
2653 let agent_message = message.as_agent_message().unwrap();
2654
2655 let tool_use = agent_message
2656 .content
2657 .iter()
2658 .find_map(|content| match content {
2659 AgentMessageContent::ToolUse(tool_use) => Some(tool_use),
2660 _ => None,
2661 })
2662 .expect("expected tool use in agent message");
2663
2664 let tool_result = agent_message
2665 .tool_results
2666 .get(&tool_use.id)
2667 .expect("expected tool result");
2668
2669 let result_text = match &tool_result.content {
2670 language_model::LanguageModelToolResultContent::Text(text) => text.to_string(),
2671 _ => panic!("expected text content in tool result"),
2672 };
2673
2674 assert!(
2675 result_text.contains("timed out"),
2676 "expected tool result to indicate timeout, got: {result_text}"
2677 );
2678 assert!(
2679 !result_text.contains("The user stopped"),
2680 "tool result should not mention user stopped when it timed out, got: {result_text}"
2681 );
2682 });
2683}
2684
2685#[gpui::test]
2686async fn test_in_progress_send_canceled_by_next_send(cx: &mut TestAppContext) {
2687 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
2688 let fake_model = model.as_fake();
2689
2690 let events_1 = thread
2691 .update(cx, |thread, cx| {
2692 thread.send(UserMessageId::new(), ["Hello 1"], cx)
2693 })
2694 .unwrap();
2695 cx.run_until_parked();
2696 fake_model.send_last_completion_stream_text_chunk("Hey 1!");
2697 cx.run_until_parked();
2698
2699 let events_2 = thread
2700 .update(cx, |thread, cx| {
2701 thread.send(UserMessageId::new(), ["Hello 2"], cx)
2702 })
2703 .unwrap();
2704 cx.run_until_parked();
2705 fake_model.send_last_completion_stream_text_chunk("Hey 2!");
2706 fake_model
2707 .send_last_completion_stream_event(LanguageModelCompletionEvent::Stop(StopReason::EndTurn));
2708 fake_model.end_last_completion_stream();
2709
2710 let events_1 = events_1.collect::<Vec<_>>().await;
2711 assert_eq!(stop_events(events_1), vec![acp::StopReason::Cancelled]);
2712 let events_2 = events_2.collect::<Vec<_>>().await;
2713 assert_eq!(stop_events(events_2), vec![acp::StopReason::EndTurn]);
2714}
2715
2716#[gpui::test]
2717async fn test_retry_cancelled_promptly_on_new_send(cx: &mut TestAppContext) {
2718 // Regression test: when a completion fails with a retryable error (e.g. upstream 500),
2719 // the retry loop waits on a timer. If the user switches models and sends a new message
2720 // during that delay, the old turn should exit immediately instead of retrying with the
2721 // stale model.
2722 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
2723 let model_a = model.as_fake();
2724
2725 // Start a turn with model_a.
2726 let events_1 = thread
2727 .update(cx, |thread, cx| {
2728 thread.send(UserMessageId::new(), ["Hello"], cx)
2729 })
2730 .unwrap();
2731 cx.run_until_parked();
2732 assert_eq!(model_a.completion_count(), 1);
2733
2734 // Model returns a retryable upstream 500. The turn enters the retry delay.
2735 model_a.send_last_completion_stream_error(
2736 LanguageModelCompletionError::UpstreamProviderError {
2737 message: "Internal server error".to_string(),
2738 status: http_client::StatusCode::INTERNAL_SERVER_ERROR,
2739 retry_after: None,
2740 },
2741 );
2742 model_a.end_last_completion_stream();
2743 cx.run_until_parked();
2744
2745 // The old completion was consumed; model_a has no pending requests yet because the
2746 // retry timer hasn't fired.
2747 assert_eq!(model_a.completion_count(), 0);
2748
2749 // Switch to model_b and send a new message. This cancels the old turn.
2750 let model_b = Arc::new(FakeLanguageModel::with_id_and_thinking(
2751 "fake", "model-b", "Model B", false,
2752 ));
2753 thread.update(cx, |thread, cx| {
2754 thread.set_model(model_b.clone(), cx);
2755 });
2756 let events_2 = thread
2757 .update(cx, |thread, cx| {
2758 thread.send(UserMessageId::new(), ["Continue"], cx)
2759 })
2760 .unwrap();
2761 cx.run_until_parked();
2762
2763 // model_b should have received its completion request.
2764 assert_eq!(model_b.as_fake().completion_count(), 1);
2765
2766 // Advance the clock well past the retry delay (BASE_RETRY_DELAY = 5s).
2767 cx.executor().advance_clock(Duration::from_secs(10));
2768 cx.run_until_parked();
2769
2770 // model_a must NOT have received another completion request — the cancelled turn
2771 // should have exited during the retry delay rather than retrying with the old model.
2772 assert_eq!(
2773 model_a.completion_count(),
2774 0,
2775 "old model should not receive a retry request after cancellation"
2776 );
2777
2778 // Complete model_b's turn.
2779 model_b
2780 .as_fake()
2781 .send_last_completion_stream_text_chunk("Done!");
2782 model_b
2783 .as_fake()
2784 .send_last_completion_stream_event(LanguageModelCompletionEvent::Stop(StopReason::EndTurn));
2785 model_b.as_fake().end_last_completion_stream();
2786
2787 let events_1 = events_1.collect::<Vec<_>>().await;
2788 assert_eq!(stop_events(events_1), vec![acp::StopReason::Cancelled]);
2789
2790 let events_2 = events_2.collect::<Vec<_>>().await;
2791 assert_eq!(stop_events(events_2), vec![acp::StopReason::EndTurn]);
2792}
2793
2794#[gpui::test]
2795async fn test_subsequent_successful_sends_dont_cancel(cx: &mut TestAppContext) {
2796 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
2797 let fake_model = model.as_fake();
2798
2799 let events_1 = thread
2800 .update(cx, |thread, cx| {
2801 thread.send(UserMessageId::new(), ["Hello 1"], cx)
2802 })
2803 .unwrap();
2804 cx.run_until_parked();
2805 fake_model.send_last_completion_stream_text_chunk("Hey 1!");
2806 fake_model
2807 .send_last_completion_stream_event(LanguageModelCompletionEvent::Stop(StopReason::EndTurn));
2808 fake_model.end_last_completion_stream();
2809 let events_1 = events_1.collect::<Vec<_>>().await;
2810
2811 let events_2 = thread
2812 .update(cx, |thread, cx| {
2813 thread.send(UserMessageId::new(), ["Hello 2"], cx)
2814 })
2815 .unwrap();
2816 cx.run_until_parked();
2817 fake_model.send_last_completion_stream_text_chunk("Hey 2!");
2818 fake_model
2819 .send_last_completion_stream_event(LanguageModelCompletionEvent::Stop(StopReason::EndTurn));
2820 fake_model.end_last_completion_stream();
2821 let events_2 = events_2.collect::<Vec<_>>().await;
2822
2823 assert_eq!(stop_events(events_1), vec![acp::StopReason::EndTurn]);
2824 assert_eq!(stop_events(events_2), vec![acp::StopReason::EndTurn]);
2825}
2826
2827#[gpui::test]
2828async fn test_refusal(cx: &mut TestAppContext) {
2829 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
2830 let fake_model = model.as_fake();
2831
2832 let events = thread
2833 .update(cx, |thread, cx| {
2834 thread.send(UserMessageId::new(), ["Hello"], cx)
2835 })
2836 .unwrap();
2837 cx.run_until_parked();
2838 thread.read_with(cx, |thread, _| {
2839 assert_eq!(
2840 thread.to_markdown(),
2841 indoc! {"
2842 ## User
2843
2844 Hello
2845 "}
2846 );
2847 });
2848
2849 fake_model.send_last_completion_stream_text_chunk("Hey!");
2850 cx.run_until_parked();
2851 thread.read_with(cx, |thread, _| {
2852 assert_eq!(
2853 thread.to_markdown(),
2854 indoc! {"
2855 ## User
2856
2857 Hello
2858
2859 ## Assistant
2860
2861 Hey!
2862 "}
2863 );
2864 });
2865
2866 // If the model refuses to continue, the thread should remove all the messages after the last user message.
2867 fake_model
2868 .send_last_completion_stream_event(LanguageModelCompletionEvent::Stop(StopReason::Refusal));
2869 let events = events.collect::<Vec<_>>().await;
2870 assert_eq!(stop_events(events), vec![acp::StopReason::Refusal]);
2871 thread.read_with(cx, |thread, _| {
2872 assert_eq!(thread.to_markdown(), "");
2873 });
2874}
2875
2876#[gpui::test]
2877async fn test_truncate_first_message(cx: &mut TestAppContext) {
2878 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
2879 let fake_model = model.as_fake();
2880
2881 let message_id = UserMessageId::new();
2882 thread
2883 .update(cx, |thread, cx| {
2884 thread.send(message_id.clone(), ["Hello"], cx)
2885 })
2886 .unwrap();
2887 cx.run_until_parked();
2888 thread.read_with(cx, |thread, _| {
2889 assert_eq!(
2890 thread.to_markdown(),
2891 indoc! {"
2892 ## User
2893
2894 Hello
2895 "}
2896 );
2897 assert_eq!(thread.latest_token_usage(), None);
2898 });
2899
2900 fake_model.send_last_completion_stream_text_chunk("Hey!");
2901 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::UsageUpdate(
2902 language_model::TokenUsage {
2903 input_tokens: 32_000,
2904 output_tokens: 16_000,
2905 cache_creation_input_tokens: 0,
2906 cache_read_input_tokens: 0,
2907 },
2908 ));
2909 cx.run_until_parked();
2910 thread.read_with(cx, |thread, _| {
2911 assert_eq!(
2912 thread.to_markdown(),
2913 indoc! {"
2914 ## User
2915
2916 Hello
2917
2918 ## Assistant
2919
2920 Hey!
2921 "}
2922 );
2923 assert_eq!(
2924 thread.latest_token_usage(),
2925 Some(acp_thread::TokenUsage {
2926 used_tokens: 32_000 + 16_000,
2927 max_tokens: 1_000_000,
2928 max_output_tokens: None,
2929 input_tokens: 32_000,
2930 output_tokens: 16_000,
2931 })
2932 );
2933 });
2934
2935 thread
2936 .update(cx, |thread, cx| thread.truncate(message_id, cx))
2937 .unwrap();
2938 cx.run_until_parked();
2939 thread.read_with(cx, |thread, _| {
2940 assert_eq!(thread.to_markdown(), "");
2941 assert_eq!(thread.latest_token_usage(), None);
2942 });
2943
2944 // Ensure we can still send a new message after truncation.
2945 thread
2946 .update(cx, |thread, cx| {
2947 thread.send(UserMessageId::new(), ["Hi"], cx)
2948 })
2949 .unwrap();
2950 thread.update(cx, |thread, _cx| {
2951 assert_eq!(
2952 thread.to_markdown(),
2953 indoc! {"
2954 ## User
2955
2956 Hi
2957 "}
2958 );
2959 });
2960 cx.run_until_parked();
2961 fake_model.send_last_completion_stream_text_chunk("Ahoy!");
2962 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::UsageUpdate(
2963 language_model::TokenUsage {
2964 input_tokens: 40_000,
2965 output_tokens: 20_000,
2966 cache_creation_input_tokens: 0,
2967 cache_read_input_tokens: 0,
2968 },
2969 ));
2970 cx.run_until_parked();
2971 thread.read_with(cx, |thread, _| {
2972 assert_eq!(
2973 thread.to_markdown(),
2974 indoc! {"
2975 ## User
2976
2977 Hi
2978
2979 ## Assistant
2980
2981 Ahoy!
2982 "}
2983 );
2984
2985 assert_eq!(
2986 thread.latest_token_usage(),
2987 Some(acp_thread::TokenUsage {
2988 used_tokens: 40_000 + 20_000,
2989 max_tokens: 1_000_000,
2990 max_output_tokens: None,
2991 input_tokens: 40_000,
2992 output_tokens: 20_000,
2993 })
2994 );
2995 });
2996}
2997
2998#[gpui::test]
2999async fn test_truncate_second_message(cx: &mut TestAppContext) {
3000 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
3001 let fake_model = model.as_fake();
3002
3003 thread
3004 .update(cx, |thread, cx| {
3005 thread.send(UserMessageId::new(), ["Message 1"], cx)
3006 })
3007 .unwrap();
3008 cx.run_until_parked();
3009 fake_model.send_last_completion_stream_text_chunk("Message 1 response");
3010 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::UsageUpdate(
3011 language_model::TokenUsage {
3012 input_tokens: 32_000,
3013 output_tokens: 16_000,
3014 cache_creation_input_tokens: 0,
3015 cache_read_input_tokens: 0,
3016 },
3017 ));
3018 fake_model.end_last_completion_stream();
3019 cx.run_until_parked();
3020
3021 let assert_first_message_state = |cx: &mut TestAppContext| {
3022 thread.clone().read_with(cx, |thread, _| {
3023 assert_eq!(
3024 thread.to_markdown(),
3025 indoc! {"
3026 ## User
3027
3028 Message 1
3029
3030 ## Assistant
3031
3032 Message 1 response
3033 "}
3034 );
3035
3036 assert_eq!(
3037 thread.latest_token_usage(),
3038 Some(acp_thread::TokenUsage {
3039 used_tokens: 32_000 + 16_000,
3040 max_tokens: 1_000_000,
3041 max_output_tokens: None,
3042 input_tokens: 32_000,
3043 output_tokens: 16_000,
3044 })
3045 );
3046 });
3047 };
3048
3049 assert_first_message_state(cx);
3050
3051 let second_message_id = UserMessageId::new();
3052 thread
3053 .update(cx, |thread, cx| {
3054 thread.send(second_message_id.clone(), ["Message 2"], cx)
3055 })
3056 .unwrap();
3057 cx.run_until_parked();
3058
3059 fake_model.send_last_completion_stream_text_chunk("Message 2 response");
3060 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::UsageUpdate(
3061 language_model::TokenUsage {
3062 input_tokens: 40_000,
3063 output_tokens: 20_000,
3064 cache_creation_input_tokens: 0,
3065 cache_read_input_tokens: 0,
3066 },
3067 ));
3068 fake_model.end_last_completion_stream();
3069 cx.run_until_parked();
3070
3071 thread.read_with(cx, |thread, _| {
3072 assert_eq!(
3073 thread.to_markdown(),
3074 indoc! {"
3075 ## User
3076
3077 Message 1
3078
3079 ## Assistant
3080
3081 Message 1 response
3082
3083 ## User
3084
3085 Message 2
3086
3087 ## Assistant
3088
3089 Message 2 response
3090 "}
3091 );
3092
3093 assert_eq!(
3094 thread.latest_token_usage(),
3095 Some(acp_thread::TokenUsage {
3096 used_tokens: 40_000 + 20_000,
3097 max_tokens: 1_000_000,
3098 max_output_tokens: None,
3099 input_tokens: 40_000,
3100 output_tokens: 20_000,
3101 })
3102 );
3103 });
3104
3105 thread
3106 .update(cx, |thread, cx| thread.truncate(second_message_id, cx))
3107 .unwrap();
3108 cx.run_until_parked();
3109
3110 assert_first_message_state(cx);
3111}
3112
3113#[gpui::test]
3114async fn test_title_generation(cx: &mut TestAppContext) {
3115 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
3116 let fake_model = model.as_fake();
3117
3118 let summary_model = Arc::new(FakeLanguageModel::default());
3119 thread.update(cx, |thread, cx| {
3120 thread.set_summarization_model(Some(summary_model.clone()), cx)
3121 });
3122
3123 let send = thread
3124 .update(cx, |thread, cx| {
3125 thread.send(UserMessageId::new(), ["Hello"], cx)
3126 })
3127 .unwrap();
3128 cx.run_until_parked();
3129
3130 fake_model.send_last_completion_stream_text_chunk("Hey!");
3131 fake_model.end_last_completion_stream();
3132 cx.run_until_parked();
3133 thread.read_with(cx, |thread, _| assert_eq!(thread.title(), None));
3134
3135 // Ensure the summary model has been invoked to generate a title.
3136 summary_model.send_last_completion_stream_text_chunk("Hello ");
3137 summary_model.send_last_completion_stream_text_chunk("world\nG");
3138 summary_model.send_last_completion_stream_text_chunk("oodnight Moon");
3139 summary_model.end_last_completion_stream();
3140 send.collect::<Vec<_>>().await;
3141 cx.run_until_parked();
3142 thread.read_with(cx, |thread, _| {
3143 assert_eq!(thread.title(), Some("Hello world".into()))
3144 });
3145
3146 // Send another message, ensuring no title is generated this time.
3147 let send = thread
3148 .update(cx, |thread, cx| {
3149 thread.send(UserMessageId::new(), ["Hello again"], cx)
3150 })
3151 .unwrap();
3152 cx.run_until_parked();
3153 fake_model.send_last_completion_stream_text_chunk("Hey again!");
3154 fake_model.end_last_completion_stream();
3155 cx.run_until_parked();
3156 assert_eq!(summary_model.pending_completions(), Vec::new());
3157 send.collect::<Vec<_>>().await;
3158 thread.read_with(cx, |thread, _| {
3159 assert_eq!(thread.title(), Some("Hello world".into()))
3160 });
3161}
3162
3163#[gpui::test]
3164async fn test_building_request_with_pending_tools(cx: &mut TestAppContext) {
3165 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
3166 let fake_model = model.as_fake();
3167
3168 let _events = thread
3169 .update(cx, |thread, cx| {
3170 thread.add_tool(ToolRequiringPermission);
3171 thread.add_tool(EchoTool);
3172 thread.send(UserMessageId::new(), ["Hey!"], cx)
3173 })
3174 .unwrap();
3175 cx.run_until_parked();
3176
3177 let permission_tool_use = LanguageModelToolUse {
3178 id: "tool_id_1".into(),
3179 name: ToolRequiringPermission::NAME.into(),
3180 raw_input: "{}".into(),
3181 input: json!({}),
3182 is_input_complete: true,
3183 thought_signature: None,
3184 };
3185 let echo_tool_use = LanguageModelToolUse {
3186 id: "tool_id_2".into(),
3187 name: EchoTool::NAME.into(),
3188 raw_input: json!({"text": "test"}).to_string(),
3189 input: json!({"text": "test"}),
3190 is_input_complete: true,
3191 thought_signature: None,
3192 };
3193 fake_model.send_last_completion_stream_text_chunk("Hi!");
3194 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
3195 permission_tool_use,
3196 ));
3197 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
3198 echo_tool_use.clone(),
3199 ));
3200 fake_model.end_last_completion_stream();
3201 cx.run_until_parked();
3202
3203 // Ensure pending tools are skipped when building a request.
3204 let request = thread
3205 .read_with(cx, |thread, cx| {
3206 thread.build_completion_request(CompletionIntent::EditFile, cx)
3207 })
3208 .unwrap();
3209 assert_eq!(
3210 request.messages[1..],
3211 vec![
3212 LanguageModelRequestMessage {
3213 role: Role::User,
3214 content: vec!["Hey!".into()],
3215 cache: true,
3216 reasoning_details: None,
3217 },
3218 LanguageModelRequestMessage {
3219 role: Role::Assistant,
3220 content: vec![
3221 MessageContent::Text("Hi!".into()),
3222 MessageContent::ToolUse(echo_tool_use.clone())
3223 ],
3224 cache: false,
3225 reasoning_details: None,
3226 },
3227 LanguageModelRequestMessage {
3228 role: Role::User,
3229 content: vec![MessageContent::ToolResult(LanguageModelToolResult {
3230 tool_use_id: echo_tool_use.id.clone(),
3231 tool_name: echo_tool_use.name,
3232 is_error: false,
3233 content: "test".into(),
3234 output: Some("test".into())
3235 })],
3236 cache: false,
3237 reasoning_details: None,
3238 },
3239 ],
3240 );
3241}
3242
3243#[gpui::test]
3244async fn test_agent_connection(cx: &mut TestAppContext) {
3245 cx.update(settings::init);
3246 let templates = Templates::new();
3247
3248 // Initialize language model system with test provider
3249 cx.update(|cx| {
3250 gpui_tokio::init(cx);
3251
3252 let http_client = FakeHttpClient::with_404_response();
3253 let clock = Arc::new(clock::FakeSystemClock::new());
3254 let client = Client::new(clock, http_client, cx);
3255 let user_store = cx.new(|cx| UserStore::new(client.clone(), cx));
3256 language_model::init(user_store.clone(), client.clone(), cx);
3257 language_models::init(user_store, client.clone(), cx);
3258 LanguageModelRegistry::test(cx);
3259 });
3260 cx.executor().forbid_parking();
3261
3262 // Create a project for new_thread
3263 let fake_fs = cx.update(|cx| fs::FakeFs::new(cx.background_executor().clone()));
3264 fake_fs.insert_tree(path!("/test"), json!({})).await;
3265 let project = Project::test(fake_fs.clone(), [Path::new("/test")], cx).await;
3266 let cwd = PathList::new(&[Path::new("/test")]);
3267 let thread_store = cx.new(|cx| ThreadStore::new(cx));
3268
3269 // Create agent and connection
3270 let agent = cx
3271 .update(|cx| NativeAgent::new(thread_store, templates.clone(), None, fake_fs.clone(), cx));
3272 let connection = NativeAgentConnection(agent.clone());
3273
3274 // Create a thread using new_thread
3275 let connection_rc = Rc::new(connection.clone());
3276 let acp_thread = cx
3277 .update(|cx| connection_rc.new_session(project, cwd, cx))
3278 .await
3279 .expect("new_thread should succeed");
3280
3281 // Get the session_id from the AcpThread
3282 let session_id = acp_thread.read_with(cx, |thread, _| thread.session_id().clone());
3283
3284 // Test model_selector returns Some
3285 let selector_opt = connection.model_selector(&session_id);
3286 assert!(
3287 selector_opt.is_some(),
3288 "agent should always support ModelSelector"
3289 );
3290 let selector = selector_opt.unwrap();
3291
3292 // Test list_models
3293 let listed_models = cx
3294 .update(|cx| selector.list_models(cx))
3295 .await
3296 .expect("list_models should succeed");
3297 let AgentModelList::Grouped(listed_models) = listed_models else {
3298 panic!("Unexpected model list type");
3299 };
3300 assert!(!listed_models.is_empty(), "should have at least one model");
3301 assert_eq!(
3302 listed_models[&AgentModelGroupName("Fake".into())][0]
3303 .id
3304 .0
3305 .as_ref(),
3306 "fake/fake"
3307 );
3308
3309 // Test selected_model returns the default
3310 let model = cx
3311 .update(|cx| selector.selected_model(cx))
3312 .await
3313 .expect("selected_model should succeed");
3314 let model = cx
3315 .update(|cx| agent.read(cx).models().model_from_id(&model.id))
3316 .unwrap();
3317 let model = model.as_fake();
3318 assert_eq!(model.id().0, "fake", "should return default model");
3319
3320 let request = acp_thread.update(cx, |thread, cx| thread.send(vec!["abc".into()], cx));
3321 cx.run_until_parked();
3322 model.send_last_completion_stream_text_chunk("def");
3323 cx.run_until_parked();
3324 acp_thread.read_with(cx, |thread, cx| {
3325 assert_eq!(
3326 thread.to_markdown(cx),
3327 indoc! {"
3328 ## User
3329
3330 abc
3331
3332 ## Assistant
3333
3334 def
3335
3336 "}
3337 )
3338 });
3339
3340 // Test cancel
3341 cx.update(|cx| connection.cancel(&session_id, cx));
3342 request.await.expect("prompt should fail gracefully");
3343
3344 // Explicitly close the session and drop the ACP thread.
3345 cx.update(|cx| Rc::new(connection.clone()).close_session(&session_id, cx))
3346 .await
3347 .unwrap();
3348 drop(acp_thread);
3349 let result = cx
3350 .update(|cx| {
3351 connection.prompt(
3352 Some(acp_thread::UserMessageId::new()),
3353 acp::PromptRequest::new(session_id.clone(), vec!["ghi".into()]),
3354 cx,
3355 )
3356 })
3357 .await;
3358 assert_eq!(
3359 result.as_ref().unwrap_err().to_string(),
3360 "Session not found",
3361 "unexpected result: {:?}",
3362 result
3363 );
3364}
3365
3366#[gpui::test]
3367async fn test_tool_updates_to_completion(cx: &mut TestAppContext) {
3368 let ThreadTest { thread, model, .. } = setup(cx, TestModel::Fake).await;
3369 thread.update(cx, |thread, _cx| thread.add_tool(EchoTool));
3370 let fake_model = model.as_fake();
3371
3372 let mut events = thread
3373 .update(cx, |thread, cx| {
3374 thread.send(UserMessageId::new(), ["Echo something"], cx)
3375 })
3376 .unwrap();
3377 cx.run_until_parked();
3378
3379 // Simulate streaming partial input.
3380 let input = json!({});
3381 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
3382 LanguageModelToolUse {
3383 id: "1".into(),
3384 name: EchoTool::NAME.into(),
3385 raw_input: input.to_string(),
3386 input,
3387 is_input_complete: false,
3388 thought_signature: None,
3389 },
3390 ));
3391
3392 // Input streaming completed
3393 let input = json!({ "text": "Hello!" });
3394 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
3395 LanguageModelToolUse {
3396 id: "1".into(),
3397 name: "echo".into(),
3398 raw_input: input.to_string(),
3399 input,
3400 is_input_complete: true,
3401 thought_signature: None,
3402 },
3403 ));
3404 fake_model.end_last_completion_stream();
3405 cx.run_until_parked();
3406
3407 let tool_call = expect_tool_call(&mut events).await;
3408 assert_eq!(
3409 tool_call,
3410 acp::ToolCall::new("1", "Echo")
3411 .raw_input(json!({}))
3412 .meta(acp::Meta::from_iter([("tool_name".into(), "echo".into())]))
3413 );
3414 let update = expect_tool_call_update_fields(&mut events).await;
3415 assert_eq!(
3416 update,
3417 acp::ToolCallUpdate::new(
3418 "1",
3419 acp::ToolCallUpdateFields::new()
3420 .title("Echo")
3421 .kind(acp::ToolKind::Other)
3422 .raw_input(json!({ "text": "Hello!"}))
3423 )
3424 );
3425 let update = expect_tool_call_update_fields(&mut events).await;
3426 assert_eq!(
3427 update,
3428 acp::ToolCallUpdate::new(
3429 "1",
3430 acp::ToolCallUpdateFields::new().status(acp::ToolCallStatus::InProgress)
3431 )
3432 );
3433 let update = expect_tool_call_update_fields(&mut events).await;
3434 assert_eq!(
3435 update,
3436 acp::ToolCallUpdate::new(
3437 "1",
3438 acp::ToolCallUpdateFields::new()
3439 .status(acp::ToolCallStatus::Completed)
3440 .raw_output("Hello!")
3441 )
3442 );
3443}
3444
3445#[gpui::test]
3446async fn test_update_plan_tool_updates_thread_events(cx: &mut TestAppContext) {
3447 let ThreadTest { thread, model, .. } = setup(cx, TestModel::Fake).await;
3448 thread.update(cx, |thread, _cx| thread.add_tool(UpdatePlanTool));
3449 let fake_model = model.as_fake();
3450
3451 let mut events = thread
3452 .update(cx, |thread, cx| {
3453 thread.send(UserMessageId::new(), ["Make a plan"], cx)
3454 })
3455 .unwrap();
3456 cx.run_until_parked();
3457
3458 let input = json!({
3459 "plan": [
3460 {
3461 "step": "Inspect the code",
3462 "status": "completed",
3463 },
3464 {
3465 "step": "Implement the tool",
3466 "status": "in_progress"
3467 },
3468 {
3469 "step": "Run tests",
3470 "status": "pending",
3471 }
3472 ]
3473 });
3474 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
3475 LanguageModelToolUse {
3476 id: "plan_1".into(),
3477 name: UpdatePlanTool::NAME.into(),
3478 raw_input: input.to_string(),
3479 input,
3480 is_input_complete: true,
3481 thought_signature: None,
3482 },
3483 ));
3484 fake_model.end_last_completion_stream();
3485 cx.run_until_parked();
3486
3487 let tool_call = expect_tool_call(&mut events).await;
3488 assert_eq!(
3489 tool_call,
3490 acp::ToolCall::new("plan_1", "Update plan")
3491 .kind(acp::ToolKind::Think)
3492 .raw_input(json!({
3493 "plan": [
3494 {
3495 "step": "Inspect the code",
3496 "status": "completed",
3497 },
3498 {
3499 "step": "Implement the tool",
3500 "status": "in_progress"
3501 },
3502 {
3503 "step": "Run tests",
3504 "status": "pending",
3505 }
3506 ]
3507 }))
3508 .meta(acp::Meta::from_iter([(
3509 "tool_name".into(),
3510 "update_plan".into()
3511 )]))
3512 );
3513
3514 let update = expect_tool_call_update_fields(&mut events).await;
3515 assert_eq!(
3516 update,
3517 acp::ToolCallUpdate::new(
3518 "plan_1",
3519 acp::ToolCallUpdateFields::new().status(acp::ToolCallStatus::InProgress)
3520 )
3521 );
3522
3523 let plan = expect_plan(&mut events).await;
3524 assert_eq!(
3525 plan,
3526 acp::Plan::new(vec![
3527 acp::PlanEntry::new(
3528 "Inspect the code",
3529 acp::PlanEntryPriority::Medium,
3530 acp::PlanEntryStatus::Completed,
3531 ),
3532 acp::PlanEntry::new(
3533 "Implement the tool",
3534 acp::PlanEntryPriority::Medium,
3535 acp::PlanEntryStatus::InProgress,
3536 ),
3537 acp::PlanEntry::new(
3538 "Run tests",
3539 acp::PlanEntryPriority::Medium,
3540 acp::PlanEntryStatus::Pending,
3541 ),
3542 ])
3543 );
3544
3545 let update = expect_tool_call_update_fields(&mut events).await;
3546 assert_eq!(
3547 update,
3548 acp::ToolCallUpdate::new(
3549 "plan_1",
3550 acp::ToolCallUpdateFields::new()
3551 .status(acp::ToolCallStatus::Completed)
3552 .raw_output("Plan updated")
3553 )
3554 );
3555}
3556
3557#[gpui::test]
3558async fn test_send_no_retry_on_success(cx: &mut TestAppContext) {
3559 let ThreadTest { thread, model, .. } = setup(cx, TestModel::Fake).await;
3560 let fake_model = model.as_fake();
3561
3562 let mut events = thread
3563 .update(cx, |thread, cx| {
3564 thread.send(UserMessageId::new(), ["Hello!"], cx)
3565 })
3566 .unwrap();
3567 cx.run_until_parked();
3568
3569 fake_model.send_last_completion_stream_text_chunk("Hey!");
3570 fake_model.end_last_completion_stream();
3571
3572 let mut retry_events = Vec::new();
3573 while let Some(Ok(event)) = events.next().await {
3574 match event {
3575 ThreadEvent::Retry(retry_status) => {
3576 retry_events.push(retry_status);
3577 }
3578 ThreadEvent::Stop(..) => break,
3579 _ => {}
3580 }
3581 }
3582
3583 assert_eq!(retry_events.len(), 0);
3584 thread.read_with(cx, |thread, _cx| {
3585 assert_eq!(
3586 thread.to_markdown(),
3587 indoc! {"
3588 ## User
3589
3590 Hello!
3591
3592 ## Assistant
3593
3594 Hey!
3595 "}
3596 )
3597 });
3598}
3599
3600#[gpui::test]
3601async fn test_send_retry_on_error(cx: &mut TestAppContext) {
3602 let ThreadTest { thread, model, .. } = setup(cx, TestModel::Fake).await;
3603 let fake_model = model.as_fake();
3604
3605 let mut events = thread
3606 .update(cx, |thread, cx| {
3607 thread.send(UserMessageId::new(), ["Hello!"], cx)
3608 })
3609 .unwrap();
3610 cx.run_until_parked();
3611
3612 fake_model.send_last_completion_stream_text_chunk("Hey,");
3613 fake_model.send_last_completion_stream_error(LanguageModelCompletionError::ServerOverloaded {
3614 provider: LanguageModelProviderName::new("Anthropic"),
3615 retry_after: Some(Duration::from_secs(3)),
3616 });
3617 fake_model.end_last_completion_stream();
3618
3619 cx.executor().advance_clock(Duration::from_secs(3));
3620 cx.run_until_parked();
3621
3622 fake_model.send_last_completion_stream_text_chunk("there!");
3623 fake_model.end_last_completion_stream();
3624 cx.run_until_parked();
3625
3626 let mut retry_events = Vec::new();
3627 while let Some(Ok(event)) = events.next().await {
3628 match event {
3629 ThreadEvent::Retry(retry_status) => {
3630 retry_events.push(retry_status);
3631 }
3632 ThreadEvent::Stop(..) => break,
3633 _ => {}
3634 }
3635 }
3636
3637 assert_eq!(retry_events.len(), 1);
3638 assert!(matches!(
3639 retry_events[0],
3640 acp_thread::RetryStatus { attempt: 1, .. }
3641 ));
3642 thread.read_with(cx, |thread, _cx| {
3643 assert_eq!(
3644 thread.to_markdown(),
3645 indoc! {"
3646 ## User
3647
3648 Hello!
3649
3650 ## Assistant
3651
3652 Hey,
3653
3654 [resume]
3655
3656 ## Assistant
3657
3658 there!
3659 "}
3660 )
3661 });
3662}
3663
3664#[gpui::test]
3665async fn test_send_retry_finishes_tool_calls_on_error(cx: &mut TestAppContext) {
3666 let ThreadTest { thread, model, .. } = setup(cx, TestModel::Fake).await;
3667 let fake_model = model.as_fake();
3668
3669 let events = thread
3670 .update(cx, |thread, cx| {
3671 thread.add_tool(EchoTool);
3672 thread.send(UserMessageId::new(), ["Call the echo tool!"], cx)
3673 })
3674 .unwrap();
3675 cx.run_until_parked();
3676
3677 let tool_use_1 = LanguageModelToolUse {
3678 id: "tool_1".into(),
3679 name: EchoTool::NAME.into(),
3680 raw_input: json!({"text": "test"}).to_string(),
3681 input: json!({"text": "test"}),
3682 is_input_complete: true,
3683 thought_signature: None,
3684 };
3685 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
3686 tool_use_1.clone(),
3687 ));
3688 fake_model.send_last_completion_stream_error(LanguageModelCompletionError::ServerOverloaded {
3689 provider: LanguageModelProviderName::new("Anthropic"),
3690 retry_after: Some(Duration::from_secs(3)),
3691 });
3692 fake_model.end_last_completion_stream();
3693
3694 cx.executor().advance_clock(Duration::from_secs(3));
3695 let completion = fake_model.pending_completions().pop().unwrap();
3696 assert_eq!(
3697 completion.messages[1..],
3698 vec![
3699 LanguageModelRequestMessage {
3700 role: Role::User,
3701 content: vec!["Call the echo tool!".into()],
3702 cache: false,
3703 reasoning_details: None,
3704 },
3705 LanguageModelRequestMessage {
3706 role: Role::Assistant,
3707 content: vec![language_model::MessageContent::ToolUse(tool_use_1.clone())],
3708 cache: false,
3709 reasoning_details: None,
3710 },
3711 LanguageModelRequestMessage {
3712 role: Role::User,
3713 content: vec![language_model::MessageContent::ToolResult(
3714 LanguageModelToolResult {
3715 tool_use_id: tool_use_1.id.clone(),
3716 tool_name: tool_use_1.name.clone(),
3717 is_error: false,
3718 content: "test".into(),
3719 output: Some("test".into())
3720 }
3721 )],
3722 cache: true,
3723 reasoning_details: None,
3724 },
3725 ]
3726 );
3727
3728 fake_model.send_last_completion_stream_text_chunk("Done");
3729 fake_model.end_last_completion_stream();
3730 cx.run_until_parked();
3731 events.collect::<Vec<_>>().await;
3732 thread.read_with(cx, |thread, _cx| {
3733 assert_eq!(
3734 thread.last_received_or_pending_message(),
3735 Some(Message::Agent(AgentMessage {
3736 content: vec![AgentMessageContent::Text("Done".into())],
3737 tool_results: IndexMap::default(),
3738 reasoning_details: None,
3739 }))
3740 );
3741 })
3742}
3743
3744#[gpui::test]
3745async fn test_send_max_retries_exceeded(cx: &mut TestAppContext) {
3746 let ThreadTest { thread, model, .. } = setup(cx, TestModel::Fake).await;
3747 let fake_model = model.as_fake();
3748
3749 let mut events = thread
3750 .update(cx, |thread, cx| {
3751 thread.send(UserMessageId::new(), ["Hello!"], cx)
3752 })
3753 .unwrap();
3754 cx.run_until_parked();
3755
3756 for _ in 0..crate::thread::MAX_RETRY_ATTEMPTS + 1 {
3757 fake_model.send_last_completion_stream_error(
3758 LanguageModelCompletionError::ServerOverloaded {
3759 provider: LanguageModelProviderName::new("Anthropic"),
3760 retry_after: Some(Duration::from_secs(3)),
3761 },
3762 );
3763 fake_model.end_last_completion_stream();
3764 cx.executor().advance_clock(Duration::from_secs(3));
3765 cx.run_until_parked();
3766 }
3767
3768 let mut errors = Vec::new();
3769 let mut retry_events = Vec::new();
3770 while let Some(event) = events.next().await {
3771 match event {
3772 Ok(ThreadEvent::Retry(retry_status)) => {
3773 retry_events.push(retry_status);
3774 }
3775 Ok(ThreadEvent::Stop(..)) => break,
3776 Err(error) => errors.push(error),
3777 _ => {}
3778 }
3779 }
3780
3781 assert_eq!(
3782 retry_events.len(),
3783 crate::thread::MAX_RETRY_ATTEMPTS as usize
3784 );
3785 for i in 0..crate::thread::MAX_RETRY_ATTEMPTS as usize {
3786 assert_eq!(retry_events[i].attempt, i + 1);
3787 }
3788 assert_eq!(errors.len(), 1);
3789 let error = errors[0]
3790 .downcast_ref::<LanguageModelCompletionError>()
3791 .unwrap();
3792 assert!(matches!(
3793 error,
3794 LanguageModelCompletionError::ServerOverloaded { .. }
3795 ));
3796}
3797
3798#[gpui::test]
3799async fn test_streaming_tool_completes_when_llm_stream_ends_without_final_input(
3800 cx: &mut TestAppContext,
3801) {
3802 init_test(cx);
3803 always_allow_tools(cx);
3804
3805 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
3806 let fake_model = model.as_fake();
3807
3808 thread.update(cx, |thread, _cx| {
3809 thread.add_tool(StreamingEchoTool::new());
3810 });
3811
3812 let _events = thread
3813 .update(cx, |thread, cx| {
3814 thread.send(UserMessageId::new(), ["Use the streaming_echo tool"], cx)
3815 })
3816 .unwrap();
3817 cx.run_until_parked();
3818
3819 // Send a partial tool use (is_input_complete = false), simulating the LLM
3820 // streaming input for a tool.
3821 let tool_use = LanguageModelToolUse {
3822 id: "tool_1".into(),
3823 name: "streaming_echo".into(),
3824 raw_input: r#"{"text": "partial"}"#.into(),
3825 input: json!({"text": "partial"}),
3826 is_input_complete: false,
3827 thought_signature: None,
3828 };
3829 fake_model
3830 .send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(tool_use.clone()));
3831 cx.run_until_parked();
3832
3833 // Send a stream error WITHOUT ever sending is_input_complete = true.
3834 // Before the fix, this would deadlock: the tool waits for more partials
3835 // (or cancellation), run_turn_internal waits for the tool, and the sender
3836 // keeping the channel open lives inside RunningTurn.
3837 fake_model.send_last_completion_stream_error(
3838 LanguageModelCompletionError::UpstreamProviderError {
3839 message: "Internal server error".to_string(),
3840 status: http_client::StatusCode::INTERNAL_SERVER_ERROR,
3841 retry_after: None,
3842 },
3843 );
3844 fake_model.end_last_completion_stream();
3845
3846 // Advance past the retry delay so run_turn_internal retries.
3847 cx.executor().advance_clock(Duration::from_secs(5));
3848 cx.run_until_parked();
3849
3850 // The retry request should contain the streaming tool's error result,
3851 // proving the tool terminated and its result was forwarded.
3852 let completion = fake_model
3853 .pending_completions()
3854 .pop()
3855 .expect("No running turn");
3856 assert_eq!(
3857 completion.messages[1..],
3858 vec![
3859 LanguageModelRequestMessage {
3860 role: Role::User,
3861 content: vec!["Use the streaming_echo tool".into()],
3862 cache: false,
3863 reasoning_details: None,
3864 },
3865 LanguageModelRequestMessage {
3866 role: Role::Assistant,
3867 content: vec![language_model::MessageContent::ToolUse(tool_use.clone())],
3868 cache: false,
3869 reasoning_details: None,
3870 },
3871 LanguageModelRequestMessage {
3872 role: Role::User,
3873 content: vec![language_model::MessageContent::ToolResult(
3874 LanguageModelToolResult {
3875 tool_use_id: tool_use.id.clone(),
3876 tool_name: tool_use.name,
3877 is_error: true,
3878 content: "Failed to receive tool input: tool input was not fully received"
3879 .into(),
3880 output: Some(
3881 "Failed to receive tool input: tool input was not fully received"
3882 .into()
3883 ),
3884 }
3885 )],
3886 cache: true,
3887 reasoning_details: None,
3888 },
3889 ]
3890 );
3891
3892 // Finish the retry round so the turn completes cleanly.
3893 fake_model.send_last_completion_stream_text_chunk("Done");
3894 fake_model.end_last_completion_stream();
3895 cx.run_until_parked();
3896
3897 thread.read_with(cx, |thread, _cx| {
3898 assert!(
3899 thread.is_turn_complete(),
3900 "Thread should not be stuck; the turn should have completed",
3901 );
3902 });
3903}
3904
3905/// Filters out the stop events for asserting against in tests
3906fn stop_events(result_events: Vec<Result<ThreadEvent>>) -> Vec<acp::StopReason> {
3907 result_events
3908 .into_iter()
3909 .filter_map(|event| match event.unwrap() {
3910 ThreadEvent::Stop(stop_reason) => Some(stop_reason),
3911 _ => None,
3912 })
3913 .collect()
3914}
3915
3916struct ThreadTest {
3917 model: Arc<dyn LanguageModel>,
3918 thread: Entity<Thread>,
3919 project_context: Entity<ProjectContext>,
3920 context_server_store: Entity<ContextServerStore>,
3921 fs: Arc<FakeFs>,
3922}
3923
3924enum TestModel {
3925 Sonnet4,
3926 Fake,
3927}
3928
3929impl TestModel {
3930 fn id(&self) -> LanguageModelId {
3931 match self {
3932 TestModel::Sonnet4 => LanguageModelId("claude-sonnet-4-latest".into()),
3933 TestModel::Fake => unreachable!(),
3934 }
3935 }
3936}
3937
3938async fn setup(cx: &mut TestAppContext, model: TestModel) -> ThreadTest {
3939 cx.executor().allow_parking();
3940
3941 let fs = FakeFs::new(cx.background_executor.clone());
3942 fs.create_dir(paths::settings_file().parent().unwrap())
3943 .await
3944 .unwrap();
3945 fs.insert_file(
3946 paths::settings_file(),
3947 json!({
3948 "agent": {
3949 "default_profile": "test-profile",
3950 "profiles": {
3951 "test-profile": {
3952 "name": "Test Profile",
3953 "tools": {
3954 EchoTool::NAME: true,
3955 DelayTool::NAME: true,
3956 WordListTool::NAME: true,
3957 ToolRequiringPermission::NAME: true,
3958 InfiniteTool::NAME: true,
3959 CancellationAwareTool::NAME: true,
3960 StreamingEchoTool::NAME: true,
3961 StreamingFailingEchoTool::NAME: true,
3962 TerminalTool::NAME: true,
3963 UpdatePlanTool::NAME: true,
3964 }
3965 }
3966 }
3967 }
3968 })
3969 .to_string()
3970 .into_bytes(),
3971 )
3972 .await;
3973
3974 cx.update(|cx| {
3975 settings::init(cx);
3976
3977 match model {
3978 TestModel::Fake => {}
3979 TestModel::Sonnet4 => {
3980 gpui_tokio::init(cx);
3981 let http_client = ReqwestClient::user_agent("agent tests").unwrap();
3982 cx.set_http_client(Arc::new(http_client));
3983 let client = Client::production(cx);
3984 let user_store = cx.new(|cx| UserStore::new(client.clone(), cx));
3985 language_model::init(user_store.clone(), client.clone(), cx);
3986 language_models::init(user_store, client.clone(), cx);
3987 }
3988 };
3989
3990 watch_settings(fs.clone(), cx);
3991 });
3992
3993 let templates = Templates::new();
3994
3995 fs.insert_tree(path!("/test"), json!({})).await;
3996 let project = Project::test(fs.clone(), [path!("/test").as_ref()], cx).await;
3997
3998 let model = cx
3999 .update(|cx| {
4000 if let TestModel::Fake = model {
4001 Task::ready(Arc::new(FakeLanguageModel::default()) as Arc<_>)
4002 } else {
4003 let model_id = model.id();
4004 let models = LanguageModelRegistry::read_global(cx);
4005 let model = models
4006 .available_models(cx)
4007 .find(|model| model.id() == model_id)
4008 .unwrap();
4009
4010 let provider = models.provider(&model.provider_id()).unwrap();
4011 let authenticated = provider.authenticate(cx);
4012
4013 cx.spawn(async move |_cx| {
4014 authenticated.await.unwrap();
4015 model
4016 })
4017 }
4018 })
4019 .await;
4020
4021 let project_context = cx.new(|_cx| ProjectContext::default());
4022 let context_server_store = project.read_with(cx, |project, _| project.context_server_store());
4023 let context_server_registry =
4024 cx.new(|cx| ContextServerRegistry::new(context_server_store.clone(), cx));
4025 let thread = cx.new(|cx| {
4026 Thread::new(
4027 project,
4028 project_context.clone(),
4029 context_server_registry,
4030 templates,
4031 Some(model.clone()),
4032 cx,
4033 )
4034 });
4035 ThreadTest {
4036 model,
4037 thread,
4038 project_context,
4039 context_server_store,
4040 fs,
4041 }
4042}
4043
4044#[cfg(test)]
4045#[ctor::ctor]
4046fn init_logger() {
4047 if std::env::var("RUST_LOG").is_ok() {
4048 env_logger::init();
4049 }
4050}
4051
4052fn watch_settings(fs: Arc<dyn Fs>, cx: &mut App) {
4053 let fs = fs.clone();
4054 cx.spawn({
4055 async move |cx| {
4056 let (mut new_settings_content_rx, watcher_task) = settings::watch_config_file(
4057 cx.background_executor(),
4058 fs,
4059 paths::settings_file().clone(),
4060 );
4061 let _watcher_task = watcher_task;
4062
4063 while let Some(new_settings_content) = new_settings_content_rx.next().await {
4064 cx.update(|cx| {
4065 SettingsStore::update_global(cx, |settings, cx| {
4066 settings.set_user_settings(&new_settings_content, cx)
4067 })
4068 })
4069 .ok();
4070 }
4071 }
4072 })
4073 .detach();
4074}
4075
4076fn tool_names_for_completion(completion: &LanguageModelRequest) -> Vec<String> {
4077 completion
4078 .tools
4079 .iter()
4080 .map(|tool| tool.name.clone())
4081 .collect()
4082}
4083
4084fn setup_context_server(
4085 name: &'static str,
4086 tools: Vec<context_server::types::Tool>,
4087 context_server_store: &Entity<ContextServerStore>,
4088 cx: &mut TestAppContext,
4089) -> mpsc::UnboundedReceiver<(
4090 context_server::types::CallToolParams,
4091 oneshot::Sender<context_server::types::CallToolResponse>,
4092)> {
4093 cx.update(|cx| {
4094 let mut settings = ProjectSettings::get_global(cx).clone();
4095 settings.context_servers.insert(
4096 name.into(),
4097 project::project_settings::ContextServerSettings::Stdio {
4098 enabled: true,
4099 remote: false,
4100 command: ContextServerCommand {
4101 path: "somebinary".into(),
4102 args: Vec::new(),
4103 env: None,
4104 timeout: None,
4105 },
4106 },
4107 );
4108 ProjectSettings::override_global(settings, cx);
4109 });
4110
4111 let (mcp_tool_calls_tx, mcp_tool_calls_rx) = mpsc::unbounded();
4112 let fake_transport = context_server::test::create_fake_transport(name, cx.executor())
4113 .on_request::<context_server::types::requests::Initialize, _>(move |_params| async move {
4114 context_server::types::InitializeResponse {
4115 protocol_version: context_server::types::ProtocolVersion(
4116 context_server::types::LATEST_PROTOCOL_VERSION.to_string(),
4117 ),
4118 server_info: context_server::types::Implementation {
4119 name: name.into(),
4120 version: "1.0.0".to_string(),
4121 },
4122 capabilities: context_server::types::ServerCapabilities {
4123 tools: Some(context_server::types::ToolsCapabilities {
4124 list_changed: Some(true),
4125 }),
4126 ..Default::default()
4127 },
4128 meta: None,
4129 }
4130 })
4131 .on_request::<context_server::types::requests::ListTools, _>(move |_params| {
4132 let tools = tools.clone();
4133 async move {
4134 context_server::types::ListToolsResponse {
4135 tools,
4136 next_cursor: None,
4137 meta: None,
4138 }
4139 }
4140 })
4141 .on_request::<context_server::types::requests::CallTool, _>(move |params| {
4142 let mcp_tool_calls_tx = mcp_tool_calls_tx.clone();
4143 async move {
4144 let (response_tx, response_rx) = oneshot::channel();
4145 mcp_tool_calls_tx
4146 .unbounded_send((params, response_tx))
4147 .unwrap();
4148 response_rx.await.unwrap()
4149 }
4150 });
4151 context_server_store.update(cx, |store, cx| {
4152 store.start_server(
4153 Arc::new(ContextServer::new(
4154 ContextServerId(name.into()),
4155 Arc::new(fake_transport),
4156 )),
4157 cx,
4158 );
4159 });
4160 cx.run_until_parked();
4161 mcp_tool_calls_rx
4162}
4163
4164#[gpui::test]
4165async fn test_tokens_before_message(cx: &mut TestAppContext) {
4166 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
4167 let fake_model = model.as_fake();
4168
4169 // First message
4170 let message_1_id = UserMessageId::new();
4171 thread
4172 .update(cx, |thread, cx| {
4173 thread.send(message_1_id.clone(), ["First message"], cx)
4174 })
4175 .unwrap();
4176 cx.run_until_parked();
4177
4178 // Before any response, tokens_before_message should return None for first message
4179 thread.read_with(cx, |thread, _| {
4180 assert_eq!(
4181 thread.tokens_before_message(&message_1_id),
4182 None,
4183 "First message should have no tokens before it"
4184 );
4185 });
4186
4187 // Complete first message with usage
4188 fake_model.send_last_completion_stream_text_chunk("Response 1");
4189 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::UsageUpdate(
4190 language_model::TokenUsage {
4191 input_tokens: 100,
4192 output_tokens: 50,
4193 cache_creation_input_tokens: 0,
4194 cache_read_input_tokens: 0,
4195 },
4196 ));
4197 fake_model.end_last_completion_stream();
4198 cx.run_until_parked();
4199
4200 // First message still has no tokens before it
4201 thread.read_with(cx, |thread, _| {
4202 assert_eq!(
4203 thread.tokens_before_message(&message_1_id),
4204 None,
4205 "First message should still have no tokens before it after response"
4206 );
4207 });
4208
4209 // Second message
4210 let message_2_id = UserMessageId::new();
4211 thread
4212 .update(cx, |thread, cx| {
4213 thread.send(message_2_id.clone(), ["Second message"], cx)
4214 })
4215 .unwrap();
4216 cx.run_until_parked();
4217
4218 // Second message should have first message's input tokens before it
4219 thread.read_with(cx, |thread, _| {
4220 assert_eq!(
4221 thread.tokens_before_message(&message_2_id),
4222 Some(100),
4223 "Second message should have 100 tokens before it (from first request)"
4224 );
4225 });
4226
4227 // Complete second message
4228 fake_model.send_last_completion_stream_text_chunk("Response 2");
4229 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::UsageUpdate(
4230 language_model::TokenUsage {
4231 input_tokens: 250, // Total for this request (includes previous context)
4232 output_tokens: 75,
4233 cache_creation_input_tokens: 0,
4234 cache_read_input_tokens: 0,
4235 },
4236 ));
4237 fake_model.end_last_completion_stream();
4238 cx.run_until_parked();
4239
4240 // Third message
4241 let message_3_id = UserMessageId::new();
4242 thread
4243 .update(cx, |thread, cx| {
4244 thread.send(message_3_id.clone(), ["Third message"], cx)
4245 })
4246 .unwrap();
4247 cx.run_until_parked();
4248
4249 // Third message should have second message's input tokens (250) before it
4250 thread.read_with(cx, |thread, _| {
4251 assert_eq!(
4252 thread.tokens_before_message(&message_3_id),
4253 Some(250),
4254 "Third message should have 250 tokens before it (from second request)"
4255 );
4256 // Second message should still have 100
4257 assert_eq!(
4258 thread.tokens_before_message(&message_2_id),
4259 Some(100),
4260 "Second message should still have 100 tokens before it"
4261 );
4262 // First message still has none
4263 assert_eq!(
4264 thread.tokens_before_message(&message_1_id),
4265 None,
4266 "First message should still have no tokens before it"
4267 );
4268 });
4269}
4270
4271#[gpui::test]
4272async fn test_tokens_before_message_after_truncate(cx: &mut TestAppContext) {
4273 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
4274 let fake_model = model.as_fake();
4275
4276 // Set up three messages with responses
4277 let message_1_id = UserMessageId::new();
4278 thread
4279 .update(cx, |thread, cx| {
4280 thread.send(message_1_id.clone(), ["Message 1"], cx)
4281 })
4282 .unwrap();
4283 cx.run_until_parked();
4284 fake_model.send_last_completion_stream_text_chunk("Response 1");
4285 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::UsageUpdate(
4286 language_model::TokenUsage {
4287 input_tokens: 100,
4288 output_tokens: 50,
4289 cache_creation_input_tokens: 0,
4290 cache_read_input_tokens: 0,
4291 },
4292 ));
4293 fake_model.end_last_completion_stream();
4294 cx.run_until_parked();
4295
4296 let message_2_id = UserMessageId::new();
4297 thread
4298 .update(cx, |thread, cx| {
4299 thread.send(message_2_id.clone(), ["Message 2"], cx)
4300 })
4301 .unwrap();
4302 cx.run_until_parked();
4303 fake_model.send_last_completion_stream_text_chunk("Response 2");
4304 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::UsageUpdate(
4305 language_model::TokenUsage {
4306 input_tokens: 250,
4307 output_tokens: 75,
4308 cache_creation_input_tokens: 0,
4309 cache_read_input_tokens: 0,
4310 },
4311 ));
4312 fake_model.end_last_completion_stream();
4313 cx.run_until_parked();
4314
4315 // Verify initial state
4316 thread.read_with(cx, |thread, _| {
4317 assert_eq!(thread.tokens_before_message(&message_2_id), Some(100));
4318 });
4319
4320 // Truncate at message 2 (removes message 2 and everything after)
4321 thread
4322 .update(cx, |thread, cx| thread.truncate(message_2_id.clone(), cx))
4323 .unwrap();
4324 cx.run_until_parked();
4325
4326 // After truncation, message_2_id no longer exists, so lookup should return None
4327 thread.read_with(cx, |thread, _| {
4328 assert_eq!(
4329 thread.tokens_before_message(&message_2_id),
4330 None,
4331 "After truncation, message 2 no longer exists"
4332 );
4333 // Message 1 still exists but has no tokens before it
4334 assert_eq!(
4335 thread.tokens_before_message(&message_1_id),
4336 None,
4337 "First message still has no tokens before it"
4338 );
4339 });
4340}
4341
4342#[gpui::test]
4343async fn test_terminal_tool_permission_rules(cx: &mut TestAppContext) {
4344 init_test(cx);
4345
4346 let fs = FakeFs::new(cx.executor());
4347 fs.insert_tree("/root", json!({})).await;
4348 let project = Project::test(fs, ["/root".as_ref()], cx).await;
4349
4350 // Test 1: Deny rule blocks command
4351 {
4352 let environment = Rc::new(cx.update(|cx| {
4353 FakeThreadEnvironment::default().with_terminal(FakeTerminalHandle::new_never_exits(cx))
4354 }));
4355
4356 cx.update(|cx| {
4357 let mut settings = agent_settings::AgentSettings::get_global(cx).clone();
4358 settings.tool_permissions.tools.insert(
4359 TerminalTool::NAME.into(),
4360 agent_settings::ToolRules {
4361 default: Some(settings::ToolPermissionMode::Confirm),
4362 always_allow: vec![],
4363 always_deny: vec![
4364 agent_settings::CompiledRegex::new(r"rm\s+-rf", false).unwrap(),
4365 ],
4366 always_confirm: vec![],
4367 invalid_patterns: vec![],
4368 },
4369 );
4370 agent_settings::AgentSettings::override_global(settings, cx);
4371 });
4372
4373 #[allow(clippy::arc_with_non_send_sync)]
4374 let tool = Arc::new(crate::TerminalTool::new(project.clone(), environment));
4375 let (event_stream, _rx) = crate::ToolCallEventStream::test();
4376
4377 let task = cx.update(|cx| {
4378 tool.run(
4379 ToolInput::resolved(crate::TerminalToolInput {
4380 command: "rm -rf /".to_string(),
4381 cd: ".".to_string(),
4382 timeout_ms: None,
4383 }),
4384 event_stream,
4385 cx,
4386 )
4387 });
4388
4389 let result = task.await;
4390 assert!(
4391 result.is_err(),
4392 "expected command to be blocked by deny rule"
4393 );
4394 let err_msg = result.unwrap_err().to_lowercase();
4395 assert!(
4396 err_msg.contains("blocked"),
4397 "error should mention the command was blocked"
4398 );
4399 }
4400
4401 // Test 2: Allow rule skips confirmation (and overrides default: Deny)
4402 {
4403 let environment = Rc::new(cx.update(|cx| {
4404 FakeThreadEnvironment::default()
4405 .with_terminal(FakeTerminalHandle::new_with_immediate_exit(cx, 0))
4406 }));
4407
4408 cx.update(|cx| {
4409 let mut settings = agent_settings::AgentSettings::get_global(cx).clone();
4410 settings.tool_permissions.tools.insert(
4411 TerminalTool::NAME.into(),
4412 agent_settings::ToolRules {
4413 default: Some(settings::ToolPermissionMode::Deny),
4414 always_allow: vec![
4415 agent_settings::CompiledRegex::new(r"^echo\s", false).unwrap(),
4416 ],
4417 always_deny: vec![],
4418 always_confirm: vec![],
4419 invalid_patterns: vec![],
4420 },
4421 );
4422 agent_settings::AgentSettings::override_global(settings, cx);
4423 });
4424
4425 #[allow(clippy::arc_with_non_send_sync)]
4426 let tool = Arc::new(crate::TerminalTool::new(project.clone(), environment));
4427 let (event_stream, mut rx) = crate::ToolCallEventStream::test();
4428
4429 let task = cx.update(|cx| {
4430 tool.run(
4431 ToolInput::resolved(crate::TerminalToolInput {
4432 command: "echo hello".to_string(),
4433 cd: ".".to_string(),
4434 timeout_ms: None,
4435 }),
4436 event_stream,
4437 cx,
4438 )
4439 });
4440
4441 let update = rx.expect_update_fields().await;
4442 assert!(
4443 update.content.iter().any(|blocks| {
4444 blocks
4445 .iter()
4446 .any(|c| matches!(c, acp::ToolCallContent::Terminal(_)))
4447 }),
4448 "expected terminal content (allow rule should skip confirmation and override default deny)"
4449 );
4450
4451 let result = task.await;
4452 assert!(
4453 result.is_ok(),
4454 "expected command to succeed without confirmation"
4455 );
4456 }
4457
4458 // Test 3: global default: allow does NOT override always_confirm patterns
4459 {
4460 let environment = Rc::new(cx.update(|cx| {
4461 FakeThreadEnvironment::default()
4462 .with_terminal(FakeTerminalHandle::new_with_immediate_exit(cx, 0))
4463 }));
4464
4465 cx.update(|cx| {
4466 let mut settings = agent_settings::AgentSettings::get_global(cx).clone();
4467 settings.tool_permissions.default = settings::ToolPermissionMode::Allow;
4468 settings.tool_permissions.tools.insert(
4469 TerminalTool::NAME.into(),
4470 agent_settings::ToolRules {
4471 default: Some(settings::ToolPermissionMode::Allow),
4472 always_allow: vec![],
4473 always_deny: vec![],
4474 always_confirm: vec![
4475 agent_settings::CompiledRegex::new(r"sudo", false).unwrap(),
4476 ],
4477 invalid_patterns: vec![],
4478 },
4479 );
4480 agent_settings::AgentSettings::override_global(settings, cx);
4481 });
4482
4483 #[allow(clippy::arc_with_non_send_sync)]
4484 let tool = Arc::new(crate::TerminalTool::new(project.clone(), environment));
4485 let (event_stream, mut rx) = crate::ToolCallEventStream::test();
4486
4487 let _task = cx.update(|cx| {
4488 tool.run(
4489 ToolInput::resolved(crate::TerminalToolInput {
4490 command: "sudo rm file".to_string(),
4491 cd: ".".to_string(),
4492 timeout_ms: None,
4493 }),
4494 event_stream,
4495 cx,
4496 )
4497 });
4498
4499 // With global default: allow, confirm patterns are still respected
4500 // The expect_authorization() call will panic if no authorization is requested,
4501 // which validates that the confirm pattern still triggers confirmation
4502 let _auth = rx.expect_authorization().await;
4503
4504 drop(_task);
4505 }
4506
4507 // Test 4: tool-specific default: deny is respected even with global default: allow
4508 {
4509 let environment = Rc::new(cx.update(|cx| {
4510 FakeThreadEnvironment::default()
4511 .with_terminal(FakeTerminalHandle::new_with_immediate_exit(cx, 0))
4512 }));
4513
4514 cx.update(|cx| {
4515 let mut settings = agent_settings::AgentSettings::get_global(cx).clone();
4516 settings.tool_permissions.default = settings::ToolPermissionMode::Allow;
4517 settings.tool_permissions.tools.insert(
4518 TerminalTool::NAME.into(),
4519 agent_settings::ToolRules {
4520 default: Some(settings::ToolPermissionMode::Deny),
4521 always_allow: vec![],
4522 always_deny: vec![],
4523 always_confirm: vec![],
4524 invalid_patterns: vec![],
4525 },
4526 );
4527 agent_settings::AgentSettings::override_global(settings, cx);
4528 });
4529
4530 #[allow(clippy::arc_with_non_send_sync)]
4531 let tool = Arc::new(crate::TerminalTool::new(project.clone(), environment));
4532 let (event_stream, _rx) = crate::ToolCallEventStream::test();
4533
4534 let task = cx.update(|cx| {
4535 tool.run(
4536 ToolInput::resolved(crate::TerminalToolInput {
4537 command: "echo hello".to_string(),
4538 cd: ".".to_string(),
4539 timeout_ms: None,
4540 }),
4541 event_stream,
4542 cx,
4543 )
4544 });
4545
4546 // tool-specific default: deny is respected even with global default: allow
4547 let result = task.await;
4548 assert!(
4549 result.is_err(),
4550 "expected command to be blocked by tool-specific deny default"
4551 );
4552 let err_msg = result.unwrap_err().to_lowercase();
4553 assert!(
4554 err_msg.contains("disabled"),
4555 "error should mention the tool is disabled, got: {err_msg}"
4556 );
4557 }
4558}
4559
4560#[gpui::test]
4561async fn test_subagent_tool_call_end_to_end(cx: &mut TestAppContext) {
4562 init_test(cx);
4563 cx.update(|cx| {
4564 LanguageModelRegistry::test(cx);
4565 });
4566 cx.update(|cx| {
4567 cx.update_flags(true, vec!["subagents".to_string()]);
4568 });
4569
4570 let fs = FakeFs::new(cx.executor());
4571 fs.insert_tree(
4572 "/",
4573 json!({
4574 "a": {
4575 "b.md": "Lorem"
4576 }
4577 }),
4578 )
4579 .await;
4580 let project = Project::test(fs.clone(), [path!("/a").as_ref()], cx).await;
4581 let thread_store = cx.new(|cx| ThreadStore::new(cx));
4582 let agent = cx.update(|cx| {
4583 NativeAgent::new(thread_store.clone(), Templates::new(), None, fs.clone(), cx)
4584 });
4585 let connection = Rc::new(NativeAgentConnection(agent.clone()));
4586
4587 let acp_thread = cx
4588 .update(|cx| {
4589 connection
4590 .clone()
4591 .new_session(project.clone(), PathList::new(&[Path::new("")]), cx)
4592 })
4593 .await
4594 .unwrap();
4595 let session_id = acp_thread.read_with(cx, |thread, _| thread.session_id().clone());
4596 let thread = agent.read_with(cx, |agent, _| {
4597 agent.sessions.get(&session_id).unwrap().thread.clone()
4598 });
4599 let model = Arc::new(FakeLanguageModel::default());
4600
4601 // Ensure empty threads are not saved, even if they get mutated.
4602 thread.update(cx, |thread, cx| {
4603 thread.set_model(model.clone(), cx);
4604 });
4605 cx.run_until_parked();
4606
4607 let send = acp_thread.update(cx, |thread, cx| thread.send_raw("Prompt", cx));
4608 cx.run_until_parked();
4609 model.send_last_completion_stream_text_chunk("spawning subagent");
4610 let subagent_tool_input = SpawnAgentToolInput {
4611 label: "label".to_string(),
4612 message: "subagent task prompt".to_string(),
4613 session_id: None,
4614 };
4615 let subagent_tool_use = LanguageModelToolUse {
4616 id: "subagent_1".into(),
4617 name: SpawnAgentTool::NAME.into(),
4618 raw_input: serde_json::to_string(&subagent_tool_input).unwrap(),
4619 input: serde_json::to_value(&subagent_tool_input).unwrap(),
4620 is_input_complete: true,
4621 thought_signature: None,
4622 };
4623 model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
4624 subagent_tool_use,
4625 ));
4626 model.end_last_completion_stream();
4627
4628 cx.run_until_parked();
4629
4630 let subagent_session_id = thread.read_with(cx, |thread, cx| {
4631 thread
4632 .running_subagent_ids(cx)
4633 .get(0)
4634 .expect("subagent thread should be running")
4635 .clone()
4636 });
4637
4638 let subagent_thread = agent.read_with(cx, |agent, _cx| {
4639 agent
4640 .sessions
4641 .get(&subagent_session_id)
4642 .expect("subagent session should exist")
4643 .acp_thread
4644 .clone()
4645 });
4646
4647 model.send_last_completion_stream_text_chunk("subagent task response");
4648 model.end_last_completion_stream();
4649
4650 cx.run_until_parked();
4651
4652 assert_eq!(
4653 subagent_thread.read_with(cx, |thread, cx| thread.to_markdown(cx)),
4654 indoc! {"
4655 ## User
4656
4657 subagent task prompt
4658
4659 ## Assistant
4660
4661 subagent task response
4662
4663 "}
4664 );
4665
4666 model.send_last_completion_stream_text_chunk("Response");
4667 model.end_last_completion_stream();
4668
4669 send.await.unwrap();
4670
4671 assert_eq!(
4672 acp_thread.read_with(cx, |thread, cx| thread.to_markdown(cx)),
4673 indoc! {r#"
4674 ## User
4675
4676 Prompt
4677
4678 ## Assistant
4679
4680 spawning subagent
4681
4682 **Tool Call: label**
4683 Status: Completed
4684
4685 subagent task response
4686
4687 ## Assistant
4688
4689 Response
4690
4691 "#},
4692 );
4693}
4694
4695#[gpui::test]
4696async fn test_subagent_tool_output_does_not_include_thinking(cx: &mut TestAppContext) {
4697 init_test(cx);
4698 cx.update(|cx| {
4699 LanguageModelRegistry::test(cx);
4700 });
4701 cx.update(|cx| {
4702 cx.update_flags(true, vec!["subagents".to_string()]);
4703 });
4704
4705 let fs = FakeFs::new(cx.executor());
4706 fs.insert_tree(
4707 "/",
4708 json!({
4709 "a": {
4710 "b.md": "Lorem"
4711 }
4712 }),
4713 )
4714 .await;
4715 let project = Project::test(fs.clone(), [path!("/a").as_ref()], cx).await;
4716 let thread_store = cx.new(|cx| ThreadStore::new(cx));
4717 let agent = cx.update(|cx| {
4718 NativeAgent::new(thread_store.clone(), Templates::new(), None, fs.clone(), cx)
4719 });
4720 let connection = Rc::new(NativeAgentConnection(agent.clone()));
4721
4722 let acp_thread = cx
4723 .update(|cx| {
4724 connection
4725 .clone()
4726 .new_session(project.clone(), PathList::new(&[Path::new("")]), cx)
4727 })
4728 .await
4729 .unwrap();
4730 let session_id = acp_thread.read_with(cx, |thread, _| thread.session_id().clone());
4731 let thread = agent.read_with(cx, |agent, _| {
4732 agent.sessions.get(&session_id).unwrap().thread.clone()
4733 });
4734 let model = Arc::new(FakeLanguageModel::default());
4735
4736 // Ensure empty threads are not saved, even if they get mutated.
4737 thread.update(cx, |thread, cx| {
4738 thread.set_model(model.clone(), cx);
4739 });
4740 cx.run_until_parked();
4741
4742 let send = acp_thread.update(cx, |thread, cx| thread.send_raw("Prompt", cx));
4743 cx.run_until_parked();
4744 model.send_last_completion_stream_text_chunk("spawning subagent");
4745 let subagent_tool_input = SpawnAgentToolInput {
4746 label: "label".to_string(),
4747 message: "subagent task prompt".to_string(),
4748 session_id: None,
4749 };
4750 let subagent_tool_use = LanguageModelToolUse {
4751 id: "subagent_1".into(),
4752 name: SpawnAgentTool::NAME.into(),
4753 raw_input: serde_json::to_string(&subagent_tool_input).unwrap(),
4754 input: serde_json::to_value(&subagent_tool_input).unwrap(),
4755 is_input_complete: true,
4756 thought_signature: None,
4757 };
4758 model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
4759 subagent_tool_use,
4760 ));
4761 model.end_last_completion_stream();
4762
4763 cx.run_until_parked();
4764
4765 let subagent_session_id = thread.read_with(cx, |thread, cx| {
4766 thread
4767 .running_subagent_ids(cx)
4768 .get(0)
4769 .expect("subagent thread should be running")
4770 .clone()
4771 });
4772
4773 let subagent_thread = agent.read_with(cx, |agent, _cx| {
4774 agent
4775 .sessions
4776 .get(&subagent_session_id)
4777 .expect("subagent session should exist")
4778 .acp_thread
4779 .clone()
4780 });
4781
4782 model.send_last_completion_stream_text_chunk("subagent task response 1");
4783 model.send_last_completion_stream_event(LanguageModelCompletionEvent::Thinking {
4784 text: "thinking more about the subagent task".into(),
4785 signature: None,
4786 });
4787 model.send_last_completion_stream_text_chunk("subagent task response 2");
4788 model.end_last_completion_stream();
4789
4790 cx.run_until_parked();
4791
4792 assert_eq!(
4793 subagent_thread.read_with(cx, |thread, cx| thread.to_markdown(cx)),
4794 indoc! {"
4795 ## User
4796
4797 subagent task prompt
4798
4799 ## Assistant
4800
4801 subagent task response 1
4802
4803 <thinking>
4804 thinking more about the subagent task
4805 </thinking>
4806
4807 subagent task response 2
4808
4809 "}
4810 );
4811
4812 model.send_last_completion_stream_text_chunk("Response");
4813 model.end_last_completion_stream();
4814
4815 send.await.unwrap();
4816
4817 assert_eq!(
4818 acp_thread.read_with(cx, |thread, cx| thread.to_markdown(cx)),
4819 indoc! {r#"
4820 ## User
4821
4822 Prompt
4823
4824 ## Assistant
4825
4826 spawning subagent
4827
4828 **Tool Call: label**
4829 Status: Completed
4830
4831 subagent task response 1
4832
4833 subagent task response 2
4834
4835 ## Assistant
4836
4837 Response
4838
4839 "#},
4840 );
4841}
4842
4843#[gpui::test]
4844async fn test_subagent_tool_call_cancellation_during_task_prompt(cx: &mut TestAppContext) {
4845 init_test(cx);
4846 cx.update(|cx| {
4847 LanguageModelRegistry::test(cx);
4848 });
4849 cx.update(|cx| {
4850 cx.update_flags(true, vec!["subagents".to_string()]);
4851 });
4852
4853 let fs = FakeFs::new(cx.executor());
4854 fs.insert_tree(
4855 "/",
4856 json!({
4857 "a": {
4858 "b.md": "Lorem"
4859 }
4860 }),
4861 )
4862 .await;
4863 let project = Project::test(fs.clone(), [path!("/a").as_ref()], cx).await;
4864 let thread_store = cx.new(|cx| ThreadStore::new(cx));
4865 let agent = cx.update(|cx| {
4866 NativeAgent::new(thread_store.clone(), Templates::new(), None, fs.clone(), cx)
4867 });
4868 let connection = Rc::new(NativeAgentConnection(agent.clone()));
4869
4870 let acp_thread = cx
4871 .update(|cx| {
4872 connection
4873 .clone()
4874 .new_session(project.clone(), PathList::new(&[Path::new("")]), cx)
4875 })
4876 .await
4877 .unwrap();
4878 let session_id = acp_thread.read_with(cx, |thread, _| thread.session_id().clone());
4879 let thread = agent.read_with(cx, |agent, _| {
4880 agent.sessions.get(&session_id).unwrap().thread.clone()
4881 });
4882 let model = Arc::new(FakeLanguageModel::default());
4883
4884 // Ensure empty threads are not saved, even if they get mutated.
4885 thread.update(cx, |thread, cx| {
4886 thread.set_model(model.clone(), cx);
4887 });
4888 cx.run_until_parked();
4889
4890 let send = acp_thread.update(cx, |thread, cx| thread.send_raw("Prompt", cx));
4891 cx.run_until_parked();
4892 model.send_last_completion_stream_text_chunk("spawning subagent");
4893 let subagent_tool_input = SpawnAgentToolInput {
4894 label: "label".to_string(),
4895 message: "subagent task prompt".to_string(),
4896 session_id: None,
4897 };
4898 let subagent_tool_use = LanguageModelToolUse {
4899 id: "subagent_1".into(),
4900 name: SpawnAgentTool::NAME.into(),
4901 raw_input: serde_json::to_string(&subagent_tool_input).unwrap(),
4902 input: serde_json::to_value(&subagent_tool_input).unwrap(),
4903 is_input_complete: true,
4904 thought_signature: None,
4905 };
4906 model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
4907 subagent_tool_use,
4908 ));
4909 model.end_last_completion_stream();
4910
4911 cx.run_until_parked();
4912
4913 let subagent_session_id = thread.read_with(cx, |thread, cx| {
4914 thread
4915 .running_subagent_ids(cx)
4916 .get(0)
4917 .expect("subagent thread should be running")
4918 .clone()
4919 });
4920 let subagent_acp_thread = agent.read_with(cx, |agent, _cx| {
4921 agent
4922 .sessions
4923 .get(&subagent_session_id)
4924 .expect("subagent session should exist")
4925 .acp_thread
4926 .clone()
4927 });
4928
4929 // model.send_last_completion_stream_text_chunk("subagent task response");
4930 // model.end_last_completion_stream();
4931
4932 // cx.run_until_parked();
4933
4934 acp_thread.update(cx, |thread, cx| thread.cancel(cx)).await;
4935
4936 cx.run_until_parked();
4937
4938 send.await.unwrap();
4939
4940 acp_thread.read_with(cx, |thread, cx| {
4941 assert_eq!(thread.status(), ThreadStatus::Idle);
4942 assert_eq!(
4943 thread.to_markdown(cx),
4944 indoc! {"
4945 ## User
4946
4947 Prompt
4948
4949 ## Assistant
4950
4951 spawning subagent
4952
4953 **Tool Call: label**
4954 Status: Canceled
4955
4956 "}
4957 );
4958 });
4959 subagent_acp_thread.read_with(cx, |thread, cx| {
4960 assert_eq!(thread.status(), ThreadStatus::Idle);
4961 assert_eq!(
4962 thread.to_markdown(cx),
4963 indoc! {"
4964 ## User
4965
4966 subagent task prompt
4967
4968 "}
4969 );
4970 });
4971}
4972
4973#[gpui::test]
4974async fn test_subagent_tool_resume_session(cx: &mut TestAppContext) {
4975 init_test(cx);
4976 cx.update(|cx| {
4977 LanguageModelRegistry::test(cx);
4978 });
4979 cx.update(|cx| {
4980 cx.update_flags(true, vec!["subagents".to_string()]);
4981 });
4982
4983 let fs = FakeFs::new(cx.executor());
4984 fs.insert_tree(
4985 "/",
4986 json!({
4987 "a": {
4988 "b.md": "Lorem"
4989 }
4990 }),
4991 )
4992 .await;
4993 let project = Project::test(fs.clone(), [path!("/a").as_ref()], cx).await;
4994 let thread_store = cx.new(|cx| ThreadStore::new(cx));
4995 let agent = cx.update(|cx| {
4996 NativeAgent::new(thread_store.clone(), Templates::new(), None, fs.clone(), cx)
4997 });
4998 let connection = Rc::new(NativeAgentConnection(agent.clone()));
4999
5000 let acp_thread = cx
5001 .update(|cx| {
5002 connection
5003 .clone()
5004 .new_session(project.clone(), PathList::new(&[Path::new("")]), cx)
5005 })
5006 .await
5007 .unwrap();
5008 let session_id = acp_thread.read_with(cx, |thread, _| thread.session_id().clone());
5009 let thread = agent.read_with(cx, |agent, _| {
5010 agent.sessions.get(&session_id).unwrap().thread.clone()
5011 });
5012 let model = Arc::new(FakeLanguageModel::default());
5013
5014 thread.update(cx, |thread, cx| {
5015 thread.set_model(model.clone(), cx);
5016 });
5017 cx.run_until_parked();
5018
5019 // === First turn: create subagent ===
5020 let send = acp_thread.update(cx, |thread, cx| thread.send_raw("First prompt", cx));
5021 cx.run_until_parked();
5022 model.send_last_completion_stream_text_chunk("spawning subagent");
5023 let subagent_tool_input = SpawnAgentToolInput {
5024 label: "initial task".to_string(),
5025 message: "do the first task".to_string(),
5026 session_id: None,
5027 };
5028 let subagent_tool_use = LanguageModelToolUse {
5029 id: "subagent_1".into(),
5030 name: SpawnAgentTool::NAME.into(),
5031 raw_input: serde_json::to_string(&subagent_tool_input).unwrap(),
5032 input: serde_json::to_value(&subagent_tool_input).unwrap(),
5033 is_input_complete: true,
5034 thought_signature: None,
5035 };
5036 model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
5037 subagent_tool_use,
5038 ));
5039 model.end_last_completion_stream();
5040
5041 cx.run_until_parked();
5042
5043 let subagent_session_id = thread.read_with(cx, |thread, cx| {
5044 thread
5045 .running_subagent_ids(cx)
5046 .get(0)
5047 .expect("subagent thread should be running")
5048 .clone()
5049 });
5050
5051 let subagent_acp_thread = agent.read_with(cx, |agent, _cx| {
5052 agent
5053 .sessions
5054 .get(&subagent_session_id)
5055 .expect("subagent session should exist")
5056 .acp_thread
5057 .clone()
5058 });
5059
5060 // Subagent responds
5061 model.send_last_completion_stream_text_chunk("first task response");
5062 model.end_last_completion_stream();
5063
5064 cx.run_until_parked();
5065
5066 // Parent model responds to complete first turn
5067 model.send_last_completion_stream_text_chunk("First response");
5068 model.end_last_completion_stream();
5069
5070 send.await.unwrap();
5071
5072 // Verify subagent is no longer running
5073 thread.read_with(cx, |thread, cx| {
5074 assert!(
5075 thread.running_subagent_ids(cx).is_empty(),
5076 "subagent should not be running after completion"
5077 );
5078 });
5079
5080 // === Second turn: resume subagent with session_id ===
5081 let send2 = acp_thread.update(cx, |thread, cx| thread.send_raw("Follow up", cx));
5082 cx.run_until_parked();
5083 model.send_last_completion_stream_text_chunk("resuming subagent");
5084 let resume_tool_input = SpawnAgentToolInput {
5085 label: "follow-up task".to_string(),
5086 message: "do the follow-up task".to_string(),
5087 session_id: Some(subagent_session_id.clone()),
5088 };
5089 let resume_tool_use = LanguageModelToolUse {
5090 id: "subagent_2".into(),
5091 name: SpawnAgentTool::NAME.into(),
5092 raw_input: serde_json::to_string(&resume_tool_input).unwrap(),
5093 input: serde_json::to_value(&resume_tool_input).unwrap(),
5094 is_input_complete: true,
5095 thought_signature: None,
5096 };
5097 model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(resume_tool_use));
5098 model.end_last_completion_stream();
5099
5100 cx.run_until_parked();
5101
5102 // Subagent should be running again with the same session
5103 thread.read_with(cx, |thread, cx| {
5104 let running = thread.running_subagent_ids(cx);
5105 assert_eq!(running.len(), 1, "subagent should be running");
5106 assert_eq!(running[0], subagent_session_id, "should be same session");
5107 });
5108
5109 // Subagent responds to follow-up
5110 model.send_last_completion_stream_text_chunk("follow-up task response");
5111 model.end_last_completion_stream();
5112
5113 cx.run_until_parked();
5114
5115 // Parent model responds to complete second turn
5116 model.send_last_completion_stream_text_chunk("Second response");
5117 model.end_last_completion_stream();
5118
5119 send2.await.unwrap();
5120
5121 // Verify subagent is no longer running
5122 thread.read_with(cx, |thread, cx| {
5123 assert!(
5124 thread.running_subagent_ids(cx).is_empty(),
5125 "subagent should not be running after resume completion"
5126 );
5127 });
5128
5129 // Verify the subagent's acp thread has both conversation turns
5130 assert_eq!(
5131 subagent_acp_thread.read_with(cx, |thread, cx| thread.to_markdown(cx)),
5132 indoc! {"
5133 ## User
5134
5135 do the first task
5136
5137 ## Assistant
5138
5139 first task response
5140
5141 ## User
5142
5143 do the follow-up task
5144
5145 ## Assistant
5146
5147 follow-up task response
5148
5149 "}
5150 );
5151}
5152
5153#[gpui::test]
5154async fn test_subagent_thread_inherits_parent_thread_properties(cx: &mut TestAppContext) {
5155 init_test(cx);
5156
5157 cx.update(|cx| {
5158 cx.update_flags(true, vec!["subagents".to_string()]);
5159 });
5160
5161 let fs = FakeFs::new(cx.executor());
5162 fs.insert_tree(path!("/test"), json!({})).await;
5163 let project = Project::test(fs, [path!("/test").as_ref()], cx).await;
5164 let project_context = cx.new(|_cx| ProjectContext::default());
5165 let context_server_store = project.read_with(cx, |project, _| project.context_server_store());
5166 let context_server_registry =
5167 cx.new(|cx| ContextServerRegistry::new(context_server_store.clone(), cx));
5168 let model = Arc::new(FakeLanguageModel::default());
5169
5170 let parent_thread = cx.new(|cx| {
5171 Thread::new(
5172 project.clone(),
5173 project_context,
5174 context_server_registry,
5175 Templates::new(),
5176 Some(model.clone()),
5177 cx,
5178 )
5179 });
5180
5181 let subagent_thread = cx.new(|cx| Thread::new_subagent(&parent_thread, cx));
5182 subagent_thread.read_with(cx, |subagent_thread, cx| {
5183 assert!(subagent_thread.is_subagent());
5184 assert_eq!(subagent_thread.depth(), 1);
5185 assert_eq!(
5186 subagent_thread.model().map(|model| model.id()),
5187 Some(model.id())
5188 );
5189 assert_eq!(
5190 subagent_thread.parent_thread_id(),
5191 Some(parent_thread.read(cx).id().clone())
5192 );
5193
5194 let request = subagent_thread
5195 .build_completion_request(CompletionIntent::UserPrompt, cx)
5196 .unwrap();
5197 assert_eq!(request.intent, Some(CompletionIntent::Subagent));
5198 });
5199}
5200
5201#[gpui::test]
5202async fn test_max_subagent_depth_prevents_tool_registration(cx: &mut TestAppContext) {
5203 init_test(cx);
5204
5205 cx.update(|cx| {
5206 cx.update_flags(true, vec!["subagents".to_string()]);
5207 });
5208
5209 let fs = FakeFs::new(cx.executor());
5210 fs.insert_tree(path!("/test"), json!({})).await;
5211 let project = Project::test(fs, [path!("/test").as_ref()], cx).await;
5212 let project_context = cx.new(|_cx| ProjectContext::default());
5213 let context_server_store = project.read_with(cx, |project, _| project.context_server_store());
5214 let context_server_registry =
5215 cx.new(|cx| ContextServerRegistry::new(context_server_store.clone(), cx));
5216 let model = Arc::new(FakeLanguageModel::default());
5217 let environment = Rc::new(cx.update(|cx| {
5218 FakeThreadEnvironment::default().with_terminal(FakeTerminalHandle::new_never_exits(cx))
5219 }));
5220
5221 let deep_parent_thread = cx.new(|cx| {
5222 let mut thread = Thread::new(
5223 project.clone(),
5224 project_context,
5225 context_server_registry,
5226 Templates::new(),
5227 Some(model.clone()),
5228 cx,
5229 );
5230 thread.set_subagent_context(SubagentContext {
5231 parent_thread_id: agent_client_protocol::SessionId::new("parent-id"),
5232 depth: MAX_SUBAGENT_DEPTH - 1,
5233 });
5234 thread
5235 });
5236 let deep_subagent_thread = cx.new(|cx| {
5237 let mut thread = Thread::new_subagent(&deep_parent_thread, cx);
5238 thread.add_default_tools(environment, cx);
5239 thread
5240 });
5241
5242 deep_subagent_thread.read_with(cx, |thread, _| {
5243 assert_eq!(thread.depth(), MAX_SUBAGENT_DEPTH);
5244 assert!(
5245 !thread.has_registered_tool(SpawnAgentTool::NAME),
5246 "subagent tool should not be present at max depth"
5247 );
5248 });
5249}
5250
5251#[gpui::test]
5252async fn test_parent_cancel_stops_subagent(cx: &mut TestAppContext) {
5253 init_test(cx);
5254
5255 cx.update(|cx| {
5256 cx.update_flags(true, vec!["subagents".to_string()]);
5257 });
5258
5259 let fs = FakeFs::new(cx.executor());
5260 fs.insert_tree(path!("/test"), json!({})).await;
5261 let project = Project::test(fs, [path!("/test").as_ref()], cx).await;
5262 let project_context = cx.new(|_cx| ProjectContext::default());
5263 let context_server_store = project.read_with(cx, |project, _| project.context_server_store());
5264 let context_server_registry =
5265 cx.new(|cx| ContextServerRegistry::new(context_server_store.clone(), cx));
5266 let model = Arc::new(FakeLanguageModel::default());
5267
5268 let parent = cx.new(|cx| {
5269 Thread::new(
5270 project.clone(),
5271 project_context.clone(),
5272 context_server_registry.clone(),
5273 Templates::new(),
5274 Some(model.clone()),
5275 cx,
5276 )
5277 });
5278
5279 let subagent = cx.new(|cx| Thread::new_subagent(&parent, cx));
5280
5281 parent.update(cx, |thread, _cx| {
5282 thread.register_running_subagent(subagent.downgrade());
5283 });
5284
5285 subagent
5286 .update(cx, |thread, cx| {
5287 thread.send(UserMessageId::new(), ["Do work".to_string()], cx)
5288 })
5289 .unwrap();
5290 cx.run_until_parked();
5291
5292 subagent.read_with(cx, |thread, _| {
5293 assert!(!thread.is_turn_complete(), "subagent should be running");
5294 });
5295
5296 parent.update(cx, |thread, cx| {
5297 thread.cancel(cx).detach();
5298 });
5299
5300 subagent.read_with(cx, |thread, _| {
5301 assert!(
5302 thread.is_turn_complete(),
5303 "subagent should be cancelled when parent cancels"
5304 );
5305 });
5306}
5307
5308#[gpui::test]
5309async fn test_subagent_context_window_warning(cx: &mut TestAppContext) {
5310 init_test(cx);
5311 cx.update(|cx| {
5312 LanguageModelRegistry::test(cx);
5313 });
5314 cx.update(|cx| {
5315 cx.update_flags(true, vec!["subagents".to_string()]);
5316 });
5317
5318 let fs = FakeFs::new(cx.executor());
5319 fs.insert_tree(
5320 "/",
5321 json!({
5322 "a": {
5323 "b.md": "Lorem"
5324 }
5325 }),
5326 )
5327 .await;
5328 let project = Project::test(fs.clone(), [path!("/a").as_ref()], cx).await;
5329 let thread_store = cx.new(|cx| ThreadStore::new(cx));
5330 let agent = cx.update(|cx| {
5331 NativeAgent::new(thread_store.clone(), Templates::new(), None, fs.clone(), cx)
5332 });
5333 let connection = Rc::new(NativeAgentConnection(agent.clone()));
5334
5335 let acp_thread = cx
5336 .update(|cx| {
5337 connection
5338 .clone()
5339 .new_session(project.clone(), PathList::new(&[Path::new("")]), cx)
5340 })
5341 .await
5342 .unwrap();
5343 let session_id = acp_thread.read_with(cx, |thread, _| thread.session_id().clone());
5344 let thread = agent.read_with(cx, |agent, _| {
5345 agent.sessions.get(&session_id).unwrap().thread.clone()
5346 });
5347 let model = Arc::new(FakeLanguageModel::default());
5348
5349 thread.update(cx, |thread, cx| {
5350 thread.set_model(model.clone(), cx);
5351 });
5352 cx.run_until_parked();
5353
5354 // Start the parent turn
5355 let send = acp_thread.update(cx, |thread, cx| thread.send_raw("Prompt", cx));
5356 cx.run_until_parked();
5357 model.send_last_completion_stream_text_chunk("spawning subagent");
5358 let subagent_tool_input = SpawnAgentToolInput {
5359 label: "label".to_string(),
5360 message: "subagent task prompt".to_string(),
5361 session_id: None,
5362 };
5363 let subagent_tool_use = LanguageModelToolUse {
5364 id: "subagent_1".into(),
5365 name: SpawnAgentTool::NAME.into(),
5366 raw_input: serde_json::to_string(&subagent_tool_input).unwrap(),
5367 input: serde_json::to_value(&subagent_tool_input).unwrap(),
5368 is_input_complete: true,
5369 thought_signature: None,
5370 };
5371 model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
5372 subagent_tool_use,
5373 ));
5374 model.end_last_completion_stream();
5375
5376 cx.run_until_parked();
5377
5378 // Verify subagent is running
5379 let subagent_session_id = thread.read_with(cx, |thread, cx| {
5380 thread
5381 .running_subagent_ids(cx)
5382 .get(0)
5383 .expect("subagent thread should be running")
5384 .clone()
5385 });
5386
5387 // Send a usage update that crosses the warning threshold (80% of 1,000,000)
5388 model.send_last_completion_stream_text_chunk("partial work");
5389 model.send_last_completion_stream_event(LanguageModelCompletionEvent::UsageUpdate(
5390 TokenUsage {
5391 input_tokens: 850_000,
5392 output_tokens: 0,
5393 cache_creation_input_tokens: 0,
5394 cache_read_input_tokens: 0,
5395 },
5396 ));
5397
5398 cx.run_until_parked();
5399
5400 // The subagent should no longer be running
5401 thread.read_with(cx, |thread, cx| {
5402 assert!(
5403 thread.running_subagent_ids(cx).is_empty(),
5404 "subagent should be stopped after context window warning"
5405 );
5406 });
5407
5408 // The parent model should get a new completion request to respond to the tool error
5409 model.send_last_completion_stream_text_chunk("Response after warning");
5410 model.end_last_completion_stream();
5411
5412 send.await.unwrap();
5413
5414 // Verify the parent thread shows the warning error in the tool call
5415 let markdown = acp_thread.read_with(cx, |thread, cx| thread.to_markdown(cx));
5416 assert!(
5417 markdown.contains("nearing the end of its context window"),
5418 "tool output should contain context window warning message, got:\n{markdown}"
5419 );
5420 assert!(
5421 markdown.contains("Status: Failed"),
5422 "tool call should have Failed status, got:\n{markdown}"
5423 );
5424
5425 // Verify the subagent session still exists (can be resumed)
5426 agent.read_with(cx, |agent, _cx| {
5427 assert!(
5428 agent.sessions.contains_key(&subagent_session_id),
5429 "subagent session should still exist for potential resume"
5430 );
5431 });
5432}
5433
5434#[gpui::test]
5435async fn test_subagent_no_context_window_warning_when_already_at_warning(cx: &mut TestAppContext) {
5436 init_test(cx);
5437 cx.update(|cx| {
5438 LanguageModelRegistry::test(cx);
5439 });
5440 cx.update(|cx| {
5441 cx.update_flags(true, vec!["subagents".to_string()]);
5442 });
5443
5444 let fs = FakeFs::new(cx.executor());
5445 fs.insert_tree(
5446 "/",
5447 json!({
5448 "a": {
5449 "b.md": "Lorem"
5450 }
5451 }),
5452 )
5453 .await;
5454 let project = Project::test(fs.clone(), [path!("/a").as_ref()], cx).await;
5455 let thread_store = cx.new(|cx| ThreadStore::new(cx));
5456 let agent = cx.update(|cx| {
5457 NativeAgent::new(thread_store.clone(), Templates::new(), None, fs.clone(), cx)
5458 });
5459 let connection = Rc::new(NativeAgentConnection(agent.clone()));
5460
5461 let acp_thread = cx
5462 .update(|cx| {
5463 connection
5464 .clone()
5465 .new_session(project.clone(), PathList::new(&[Path::new("")]), cx)
5466 })
5467 .await
5468 .unwrap();
5469 let session_id = acp_thread.read_with(cx, |thread, _| thread.session_id().clone());
5470 let thread = agent.read_with(cx, |agent, _| {
5471 agent.sessions.get(&session_id).unwrap().thread.clone()
5472 });
5473 let model = Arc::new(FakeLanguageModel::default());
5474
5475 thread.update(cx, |thread, cx| {
5476 thread.set_model(model.clone(), cx);
5477 });
5478 cx.run_until_parked();
5479
5480 // === First turn: create subagent, trigger context window warning ===
5481 let send = acp_thread.update(cx, |thread, cx| thread.send_raw("First prompt", cx));
5482 cx.run_until_parked();
5483 model.send_last_completion_stream_text_chunk("spawning subagent");
5484 let subagent_tool_input = SpawnAgentToolInput {
5485 label: "initial task".to_string(),
5486 message: "do the first task".to_string(),
5487 session_id: None,
5488 };
5489 let subagent_tool_use = LanguageModelToolUse {
5490 id: "subagent_1".into(),
5491 name: SpawnAgentTool::NAME.into(),
5492 raw_input: serde_json::to_string(&subagent_tool_input).unwrap(),
5493 input: serde_json::to_value(&subagent_tool_input).unwrap(),
5494 is_input_complete: true,
5495 thought_signature: None,
5496 };
5497 model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
5498 subagent_tool_use,
5499 ));
5500 model.end_last_completion_stream();
5501
5502 cx.run_until_parked();
5503
5504 let subagent_session_id = thread.read_with(cx, |thread, cx| {
5505 thread
5506 .running_subagent_ids(cx)
5507 .get(0)
5508 .expect("subagent thread should be running")
5509 .clone()
5510 });
5511
5512 // Subagent sends a usage update that crosses the warning threshold.
5513 // This triggers Normal→Warning, stopping the subagent.
5514 model.send_last_completion_stream_text_chunk("partial work");
5515 model.send_last_completion_stream_event(LanguageModelCompletionEvent::UsageUpdate(
5516 TokenUsage {
5517 input_tokens: 850_000,
5518 output_tokens: 0,
5519 cache_creation_input_tokens: 0,
5520 cache_read_input_tokens: 0,
5521 },
5522 ));
5523
5524 cx.run_until_parked();
5525
5526 // Verify the first turn was stopped with a context window warning
5527 thread.read_with(cx, |thread, cx| {
5528 assert!(
5529 thread.running_subagent_ids(cx).is_empty(),
5530 "subagent should be stopped after context window warning"
5531 );
5532 });
5533
5534 // Parent model responds to complete first turn
5535 model.send_last_completion_stream_text_chunk("First response");
5536 model.end_last_completion_stream();
5537
5538 send.await.unwrap();
5539
5540 let markdown = acp_thread.read_with(cx, |thread, cx| thread.to_markdown(cx));
5541 assert!(
5542 markdown.contains("nearing the end of its context window"),
5543 "first turn should have context window warning, got:\n{markdown}"
5544 );
5545
5546 // === Second turn: resume the same subagent (now at Warning level) ===
5547 let send2 = acp_thread.update(cx, |thread, cx| thread.send_raw("Follow up", cx));
5548 cx.run_until_parked();
5549 model.send_last_completion_stream_text_chunk("resuming subagent");
5550 let resume_tool_input = SpawnAgentToolInput {
5551 label: "follow-up task".to_string(),
5552 message: "do the follow-up task".to_string(),
5553 session_id: Some(subagent_session_id.clone()),
5554 };
5555 let resume_tool_use = LanguageModelToolUse {
5556 id: "subagent_2".into(),
5557 name: SpawnAgentTool::NAME.into(),
5558 raw_input: serde_json::to_string(&resume_tool_input).unwrap(),
5559 input: serde_json::to_value(&resume_tool_input).unwrap(),
5560 is_input_complete: true,
5561 thought_signature: None,
5562 };
5563 model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(resume_tool_use));
5564 model.end_last_completion_stream();
5565
5566 cx.run_until_parked();
5567
5568 // Subagent responds with tokens still at warning level (no worse).
5569 // Since ratio_before_prompt was already Warning, this should NOT
5570 // trigger the context window warning again.
5571 model.send_last_completion_stream_text_chunk("follow-up task response");
5572 model.send_last_completion_stream_event(LanguageModelCompletionEvent::UsageUpdate(
5573 TokenUsage {
5574 input_tokens: 870_000,
5575 output_tokens: 0,
5576 cache_creation_input_tokens: 0,
5577 cache_read_input_tokens: 0,
5578 },
5579 ));
5580 model.end_last_completion_stream();
5581
5582 cx.run_until_parked();
5583
5584 // Parent model responds to complete second turn
5585 model.send_last_completion_stream_text_chunk("Second response");
5586 model.end_last_completion_stream();
5587
5588 send2.await.unwrap();
5589
5590 // The resumed subagent should have completed normally since the ratio
5591 // didn't transition (it was Warning before and stayed at Warning)
5592 let markdown = acp_thread.read_with(cx, |thread, cx| thread.to_markdown(cx));
5593 assert!(
5594 markdown.contains("follow-up task response"),
5595 "resumed subagent should complete normally when already at warning, got:\n{markdown}"
5596 );
5597 // The second tool call should NOT have a context window warning
5598 let second_tool_pos = markdown
5599 .find("follow-up task")
5600 .expect("should find follow-up tool call");
5601 let after_second_tool = &markdown[second_tool_pos..];
5602 assert!(
5603 !after_second_tool.contains("nearing the end of its context window"),
5604 "should NOT contain context window warning for resumed subagent at same level, got:\n{after_second_tool}"
5605 );
5606}
5607
5608#[gpui::test]
5609async fn test_subagent_error_propagation(cx: &mut TestAppContext) {
5610 init_test(cx);
5611 cx.update(|cx| {
5612 LanguageModelRegistry::test(cx);
5613 });
5614 cx.update(|cx| {
5615 cx.update_flags(true, vec!["subagents".to_string()]);
5616 });
5617
5618 let fs = FakeFs::new(cx.executor());
5619 fs.insert_tree(
5620 "/",
5621 json!({
5622 "a": {
5623 "b.md": "Lorem"
5624 }
5625 }),
5626 )
5627 .await;
5628 let project = Project::test(fs.clone(), [path!("/a").as_ref()], cx).await;
5629 let thread_store = cx.new(|cx| ThreadStore::new(cx));
5630 let agent = cx.update(|cx| {
5631 NativeAgent::new(thread_store.clone(), Templates::new(), None, fs.clone(), cx)
5632 });
5633 let connection = Rc::new(NativeAgentConnection(agent.clone()));
5634
5635 let acp_thread = cx
5636 .update(|cx| {
5637 connection
5638 .clone()
5639 .new_session(project.clone(), PathList::new(&[Path::new("")]), cx)
5640 })
5641 .await
5642 .unwrap();
5643 let session_id = acp_thread.read_with(cx, |thread, _| thread.session_id().clone());
5644 let thread = agent.read_with(cx, |agent, _| {
5645 agent.sessions.get(&session_id).unwrap().thread.clone()
5646 });
5647 let model = Arc::new(FakeLanguageModel::default());
5648
5649 thread.update(cx, |thread, cx| {
5650 thread.set_model(model.clone(), cx);
5651 });
5652 cx.run_until_parked();
5653
5654 // Start the parent turn
5655 let send = acp_thread.update(cx, |thread, cx| thread.send_raw("Prompt", cx));
5656 cx.run_until_parked();
5657 model.send_last_completion_stream_text_chunk("spawning subagent");
5658 let subagent_tool_input = SpawnAgentToolInput {
5659 label: "label".to_string(),
5660 message: "subagent task prompt".to_string(),
5661 session_id: None,
5662 };
5663 let subagent_tool_use = LanguageModelToolUse {
5664 id: "subagent_1".into(),
5665 name: SpawnAgentTool::NAME.into(),
5666 raw_input: serde_json::to_string(&subagent_tool_input).unwrap(),
5667 input: serde_json::to_value(&subagent_tool_input).unwrap(),
5668 is_input_complete: true,
5669 thought_signature: None,
5670 };
5671 model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
5672 subagent_tool_use,
5673 ));
5674 model.end_last_completion_stream();
5675
5676 cx.run_until_parked();
5677
5678 // Verify subagent is running
5679 thread.read_with(cx, |thread, cx| {
5680 assert!(
5681 !thread.running_subagent_ids(cx).is_empty(),
5682 "subagent should be running"
5683 );
5684 });
5685
5686 // The subagent's model returns a non-retryable error
5687 model.send_last_completion_stream_error(LanguageModelCompletionError::PromptTooLarge {
5688 tokens: None,
5689 });
5690
5691 cx.run_until_parked();
5692
5693 // The subagent should no longer be running
5694 thread.read_with(cx, |thread, cx| {
5695 assert!(
5696 thread.running_subagent_ids(cx).is_empty(),
5697 "subagent should not be running after error"
5698 );
5699 });
5700
5701 // The parent model should get a new completion request to respond to the tool error
5702 model.send_last_completion_stream_text_chunk("Response after error");
5703 model.end_last_completion_stream();
5704
5705 send.await.unwrap();
5706
5707 // Verify the parent thread shows the error in the tool call
5708 let markdown = acp_thread.read_with(cx, |thread, cx| thread.to_markdown(cx));
5709 assert!(
5710 markdown.contains("Status: Failed"),
5711 "tool call should have Failed status after model error, got:\n{markdown}"
5712 );
5713}
5714
5715#[gpui::test]
5716async fn test_edit_file_tool_deny_rule_blocks_edit(cx: &mut TestAppContext) {
5717 init_test(cx);
5718
5719 let fs = FakeFs::new(cx.executor());
5720 fs.insert_tree("/root", json!({"sensitive_config.txt": "secret data"}))
5721 .await;
5722 let project = Project::test(fs.clone(), ["/root".as_ref()], cx).await;
5723
5724 cx.update(|cx| {
5725 let mut settings = agent_settings::AgentSettings::get_global(cx).clone();
5726 settings.tool_permissions.tools.insert(
5727 EditFileTool::NAME.into(),
5728 agent_settings::ToolRules {
5729 default: Some(settings::ToolPermissionMode::Allow),
5730 always_allow: vec![],
5731 always_deny: vec![agent_settings::CompiledRegex::new(r"sensitive", false).unwrap()],
5732 always_confirm: vec![],
5733 invalid_patterns: vec![],
5734 },
5735 );
5736 agent_settings::AgentSettings::override_global(settings, cx);
5737 });
5738
5739 let context_server_registry =
5740 cx.new(|cx| crate::ContextServerRegistry::new(project.read(cx).context_server_store(), cx));
5741 let language_registry = project.read_with(cx, |project, _cx| project.languages().clone());
5742 let templates = crate::Templates::new();
5743 let thread = cx.new(|cx| {
5744 crate::Thread::new(
5745 project.clone(),
5746 cx.new(|_cx| prompt_store::ProjectContext::default()),
5747 context_server_registry,
5748 templates.clone(),
5749 None,
5750 cx,
5751 )
5752 });
5753
5754 #[allow(clippy::arc_with_non_send_sync)]
5755 let tool = Arc::new(crate::EditFileTool::new(
5756 project.clone(),
5757 thread.downgrade(),
5758 language_registry,
5759 templates,
5760 ));
5761 let (event_stream, _rx) = crate::ToolCallEventStream::test();
5762
5763 let task = cx.update(|cx| {
5764 tool.run(
5765 ToolInput::resolved(crate::EditFileToolInput {
5766 display_description: "Edit sensitive file".to_string(),
5767 path: "root/sensitive_config.txt".into(),
5768 mode: crate::EditFileMode::Edit,
5769 }),
5770 event_stream,
5771 cx,
5772 )
5773 });
5774
5775 let result = task.await;
5776 assert!(result.is_err(), "expected edit to be blocked");
5777 assert!(
5778 result.unwrap_err().to_string().contains("blocked"),
5779 "error should mention the edit was blocked"
5780 );
5781}
5782
5783#[gpui::test]
5784async fn test_delete_path_tool_deny_rule_blocks_deletion(cx: &mut TestAppContext) {
5785 init_test(cx);
5786
5787 let fs = FakeFs::new(cx.executor());
5788 fs.insert_tree("/root", json!({"important_data.txt": "critical info"}))
5789 .await;
5790 let project = Project::test(fs.clone(), ["/root".as_ref()], cx).await;
5791
5792 cx.update(|cx| {
5793 let mut settings = agent_settings::AgentSettings::get_global(cx).clone();
5794 settings.tool_permissions.tools.insert(
5795 DeletePathTool::NAME.into(),
5796 agent_settings::ToolRules {
5797 default: Some(settings::ToolPermissionMode::Allow),
5798 always_allow: vec![],
5799 always_deny: vec![agent_settings::CompiledRegex::new(r"important", false).unwrap()],
5800 always_confirm: vec![],
5801 invalid_patterns: vec![],
5802 },
5803 );
5804 agent_settings::AgentSettings::override_global(settings, cx);
5805 });
5806
5807 let action_log = cx.new(|_cx| action_log::ActionLog::new(project.clone()));
5808
5809 #[allow(clippy::arc_with_non_send_sync)]
5810 let tool = Arc::new(crate::DeletePathTool::new(project, action_log));
5811 let (event_stream, _rx) = crate::ToolCallEventStream::test();
5812
5813 let task = cx.update(|cx| {
5814 tool.run(
5815 ToolInput::resolved(crate::DeletePathToolInput {
5816 path: "root/important_data.txt".to_string(),
5817 }),
5818 event_stream,
5819 cx,
5820 )
5821 });
5822
5823 let result = task.await;
5824 assert!(result.is_err(), "expected deletion to be blocked");
5825 assert!(
5826 result.unwrap_err().contains("blocked"),
5827 "error should mention the deletion was blocked"
5828 );
5829}
5830
5831#[gpui::test]
5832async fn test_move_path_tool_denies_if_destination_denied(cx: &mut TestAppContext) {
5833 init_test(cx);
5834
5835 let fs = FakeFs::new(cx.executor());
5836 fs.insert_tree(
5837 "/root",
5838 json!({
5839 "safe.txt": "content",
5840 "protected": {}
5841 }),
5842 )
5843 .await;
5844 let project = Project::test(fs.clone(), ["/root".as_ref()], cx).await;
5845
5846 cx.update(|cx| {
5847 let mut settings = agent_settings::AgentSettings::get_global(cx).clone();
5848 settings.tool_permissions.tools.insert(
5849 MovePathTool::NAME.into(),
5850 agent_settings::ToolRules {
5851 default: Some(settings::ToolPermissionMode::Allow),
5852 always_allow: vec![],
5853 always_deny: vec![agent_settings::CompiledRegex::new(r"protected", false).unwrap()],
5854 always_confirm: vec![],
5855 invalid_patterns: vec![],
5856 },
5857 );
5858 agent_settings::AgentSettings::override_global(settings, cx);
5859 });
5860
5861 #[allow(clippy::arc_with_non_send_sync)]
5862 let tool = Arc::new(crate::MovePathTool::new(project));
5863 let (event_stream, _rx) = crate::ToolCallEventStream::test();
5864
5865 let task = cx.update(|cx| {
5866 tool.run(
5867 ToolInput::resolved(crate::MovePathToolInput {
5868 source_path: "root/safe.txt".to_string(),
5869 destination_path: "root/protected/safe.txt".to_string(),
5870 }),
5871 event_stream,
5872 cx,
5873 )
5874 });
5875
5876 let result = task.await;
5877 assert!(
5878 result.is_err(),
5879 "expected move to be blocked due to destination path"
5880 );
5881 assert!(
5882 result.unwrap_err().contains("blocked"),
5883 "error should mention the move was blocked"
5884 );
5885}
5886
5887#[gpui::test]
5888async fn test_move_path_tool_denies_if_source_denied(cx: &mut TestAppContext) {
5889 init_test(cx);
5890
5891 let fs = FakeFs::new(cx.executor());
5892 fs.insert_tree(
5893 "/root",
5894 json!({
5895 "secret.txt": "secret content",
5896 "public": {}
5897 }),
5898 )
5899 .await;
5900 let project = Project::test(fs.clone(), ["/root".as_ref()], cx).await;
5901
5902 cx.update(|cx| {
5903 let mut settings = agent_settings::AgentSettings::get_global(cx).clone();
5904 settings.tool_permissions.tools.insert(
5905 MovePathTool::NAME.into(),
5906 agent_settings::ToolRules {
5907 default: Some(settings::ToolPermissionMode::Allow),
5908 always_allow: vec![],
5909 always_deny: vec![agent_settings::CompiledRegex::new(r"secret", false).unwrap()],
5910 always_confirm: vec![],
5911 invalid_patterns: vec![],
5912 },
5913 );
5914 agent_settings::AgentSettings::override_global(settings, cx);
5915 });
5916
5917 #[allow(clippy::arc_with_non_send_sync)]
5918 let tool = Arc::new(crate::MovePathTool::new(project));
5919 let (event_stream, _rx) = crate::ToolCallEventStream::test();
5920
5921 let task = cx.update(|cx| {
5922 tool.run(
5923 ToolInput::resolved(crate::MovePathToolInput {
5924 source_path: "root/secret.txt".to_string(),
5925 destination_path: "root/public/not_secret.txt".to_string(),
5926 }),
5927 event_stream,
5928 cx,
5929 )
5930 });
5931
5932 let result = task.await;
5933 assert!(
5934 result.is_err(),
5935 "expected move to be blocked due to source path"
5936 );
5937 assert!(
5938 result.unwrap_err().contains("blocked"),
5939 "error should mention the move was blocked"
5940 );
5941}
5942
5943#[gpui::test]
5944async fn test_copy_path_tool_deny_rule_blocks_copy(cx: &mut TestAppContext) {
5945 init_test(cx);
5946
5947 let fs = FakeFs::new(cx.executor());
5948 fs.insert_tree(
5949 "/root",
5950 json!({
5951 "confidential.txt": "confidential data",
5952 "dest": {}
5953 }),
5954 )
5955 .await;
5956 let project = Project::test(fs.clone(), ["/root".as_ref()], cx).await;
5957
5958 cx.update(|cx| {
5959 let mut settings = agent_settings::AgentSettings::get_global(cx).clone();
5960 settings.tool_permissions.tools.insert(
5961 CopyPathTool::NAME.into(),
5962 agent_settings::ToolRules {
5963 default: Some(settings::ToolPermissionMode::Allow),
5964 always_allow: vec![],
5965 always_deny: vec![
5966 agent_settings::CompiledRegex::new(r"confidential", false).unwrap(),
5967 ],
5968 always_confirm: vec![],
5969 invalid_patterns: vec![],
5970 },
5971 );
5972 agent_settings::AgentSettings::override_global(settings, cx);
5973 });
5974
5975 #[allow(clippy::arc_with_non_send_sync)]
5976 let tool = Arc::new(crate::CopyPathTool::new(project));
5977 let (event_stream, _rx) = crate::ToolCallEventStream::test();
5978
5979 let task = cx.update(|cx| {
5980 tool.run(
5981 ToolInput::resolved(crate::CopyPathToolInput {
5982 source_path: "root/confidential.txt".to_string(),
5983 destination_path: "root/dest/copy.txt".to_string(),
5984 }),
5985 event_stream,
5986 cx,
5987 )
5988 });
5989
5990 let result = task.await;
5991 assert!(result.is_err(), "expected copy to be blocked");
5992 assert!(
5993 result.unwrap_err().contains("blocked"),
5994 "error should mention the copy was blocked"
5995 );
5996}
5997
5998#[gpui::test]
5999async fn test_save_file_tool_denies_if_any_path_denied(cx: &mut TestAppContext) {
6000 init_test(cx);
6001
6002 let fs = FakeFs::new(cx.executor());
6003 fs.insert_tree(
6004 "/root",
6005 json!({
6006 "normal.txt": "normal content",
6007 "readonly": {
6008 "config.txt": "readonly content"
6009 }
6010 }),
6011 )
6012 .await;
6013 let project = Project::test(fs.clone(), ["/root".as_ref()], cx).await;
6014
6015 cx.update(|cx| {
6016 let mut settings = agent_settings::AgentSettings::get_global(cx).clone();
6017 settings.tool_permissions.tools.insert(
6018 SaveFileTool::NAME.into(),
6019 agent_settings::ToolRules {
6020 default: Some(settings::ToolPermissionMode::Allow),
6021 always_allow: vec![],
6022 always_deny: vec![agent_settings::CompiledRegex::new(r"readonly", false).unwrap()],
6023 always_confirm: vec![],
6024 invalid_patterns: vec![],
6025 },
6026 );
6027 agent_settings::AgentSettings::override_global(settings, cx);
6028 });
6029
6030 #[allow(clippy::arc_with_non_send_sync)]
6031 let tool = Arc::new(crate::SaveFileTool::new(project));
6032 let (event_stream, _rx) = crate::ToolCallEventStream::test();
6033
6034 let task = cx.update(|cx| {
6035 tool.run(
6036 ToolInput::resolved(crate::SaveFileToolInput {
6037 paths: vec![
6038 std::path::PathBuf::from("root/normal.txt"),
6039 std::path::PathBuf::from("root/readonly/config.txt"),
6040 ],
6041 }),
6042 event_stream,
6043 cx,
6044 )
6045 });
6046
6047 let result = task.await;
6048 assert!(
6049 result.is_err(),
6050 "expected save to be blocked due to denied path"
6051 );
6052 assert!(
6053 result.unwrap_err().contains("blocked"),
6054 "error should mention the save was blocked"
6055 );
6056}
6057
6058#[gpui::test]
6059async fn test_save_file_tool_respects_deny_rules(cx: &mut TestAppContext) {
6060 init_test(cx);
6061
6062 let fs = FakeFs::new(cx.executor());
6063 fs.insert_tree("/root", json!({"config.secret": "secret config"}))
6064 .await;
6065 let project = Project::test(fs.clone(), ["/root".as_ref()], cx).await;
6066
6067 cx.update(|cx| {
6068 let mut settings = agent_settings::AgentSettings::get_global(cx).clone();
6069 settings.tool_permissions.tools.insert(
6070 SaveFileTool::NAME.into(),
6071 agent_settings::ToolRules {
6072 default: Some(settings::ToolPermissionMode::Allow),
6073 always_allow: vec![],
6074 always_deny: vec![agent_settings::CompiledRegex::new(r"\.secret$", false).unwrap()],
6075 always_confirm: vec![],
6076 invalid_patterns: vec![],
6077 },
6078 );
6079 agent_settings::AgentSettings::override_global(settings, cx);
6080 });
6081
6082 #[allow(clippy::arc_with_non_send_sync)]
6083 let tool = Arc::new(crate::SaveFileTool::new(project));
6084 let (event_stream, _rx) = crate::ToolCallEventStream::test();
6085
6086 let task = cx.update(|cx| {
6087 tool.run(
6088 ToolInput::resolved(crate::SaveFileToolInput {
6089 paths: vec![std::path::PathBuf::from("root/config.secret")],
6090 }),
6091 event_stream,
6092 cx,
6093 )
6094 });
6095
6096 let result = task.await;
6097 assert!(result.is_err(), "expected save to be blocked");
6098 assert!(
6099 result.unwrap_err().contains("blocked"),
6100 "error should mention the save was blocked"
6101 );
6102}
6103
6104#[gpui::test]
6105async fn test_web_search_tool_deny_rule_blocks_search(cx: &mut TestAppContext) {
6106 init_test(cx);
6107
6108 cx.update(|cx| {
6109 let mut settings = agent_settings::AgentSettings::get_global(cx).clone();
6110 settings.tool_permissions.tools.insert(
6111 WebSearchTool::NAME.into(),
6112 agent_settings::ToolRules {
6113 default: Some(settings::ToolPermissionMode::Allow),
6114 always_allow: vec![],
6115 always_deny: vec![
6116 agent_settings::CompiledRegex::new(r"internal\.company", false).unwrap(),
6117 ],
6118 always_confirm: vec![],
6119 invalid_patterns: vec![],
6120 },
6121 );
6122 agent_settings::AgentSettings::override_global(settings, cx);
6123 });
6124
6125 #[allow(clippy::arc_with_non_send_sync)]
6126 let tool = Arc::new(crate::WebSearchTool);
6127 let (event_stream, _rx) = crate::ToolCallEventStream::test();
6128
6129 let input: crate::WebSearchToolInput =
6130 serde_json::from_value(json!({"query": "internal.company.com secrets"})).unwrap();
6131
6132 let task = cx.update(|cx| tool.run(ToolInput::resolved(input), event_stream, cx));
6133
6134 let result = task.await;
6135 assert!(result.is_err(), "expected search to be blocked");
6136 match result.unwrap_err() {
6137 crate::WebSearchToolOutput::Error { error } => {
6138 assert!(
6139 error.contains("blocked"),
6140 "error should mention the search was blocked"
6141 );
6142 }
6143 other => panic!("expected Error variant, got: {other:?}"),
6144 }
6145}
6146
6147#[gpui::test]
6148async fn test_edit_file_tool_allow_rule_skips_confirmation(cx: &mut TestAppContext) {
6149 init_test(cx);
6150
6151 let fs = FakeFs::new(cx.executor());
6152 fs.insert_tree("/root", json!({"README.md": "# Hello"}))
6153 .await;
6154 let project = Project::test(fs.clone(), ["/root".as_ref()], cx).await;
6155
6156 cx.update(|cx| {
6157 let mut settings = agent_settings::AgentSettings::get_global(cx).clone();
6158 settings.tool_permissions.tools.insert(
6159 EditFileTool::NAME.into(),
6160 agent_settings::ToolRules {
6161 default: Some(settings::ToolPermissionMode::Confirm),
6162 always_allow: vec![agent_settings::CompiledRegex::new(r"\.md$", false).unwrap()],
6163 always_deny: vec![],
6164 always_confirm: vec![],
6165 invalid_patterns: vec![],
6166 },
6167 );
6168 agent_settings::AgentSettings::override_global(settings, cx);
6169 });
6170
6171 let context_server_registry =
6172 cx.new(|cx| crate::ContextServerRegistry::new(project.read(cx).context_server_store(), cx));
6173 let language_registry = project.read_with(cx, |project, _cx| project.languages().clone());
6174 let templates = crate::Templates::new();
6175 let thread = cx.new(|cx| {
6176 crate::Thread::new(
6177 project.clone(),
6178 cx.new(|_cx| prompt_store::ProjectContext::default()),
6179 context_server_registry,
6180 templates.clone(),
6181 None,
6182 cx,
6183 )
6184 });
6185
6186 #[allow(clippy::arc_with_non_send_sync)]
6187 let tool = Arc::new(crate::EditFileTool::new(
6188 project,
6189 thread.downgrade(),
6190 language_registry,
6191 templates,
6192 ));
6193 let (event_stream, mut rx) = crate::ToolCallEventStream::test();
6194
6195 let _task = cx.update(|cx| {
6196 tool.run(
6197 ToolInput::resolved(crate::EditFileToolInput {
6198 display_description: "Edit README".to_string(),
6199 path: "root/README.md".into(),
6200 mode: crate::EditFileMode::Edit,
6201 }),
6202 event_stream,
6203 cx,
6204 )
6205 });
6206
6207 cx.run_until_parked();
6208
6209 let event = rx.try_next();
6210 assert!(
6211 !matches!(event, Ok(Some(Ok(ThreadEvent::ToolCallAuthorization(_))))),
6212 "expected no authorization request for allowed .md file"
6213 );
6214}
6215
6216#[gpui::test]
6217async fn test_edit_file_tool_allow_still_prompts_for_local_settings(cx: &mut TestAppContext) {
6218 init_test(cx);
6219
6220 let fs = FakeFs::new(cx.executor());
6221 fs.insert_tree(
6222 "/root",
6223 json!({
6224 ".zed": {
6225 "settings.json": "{}"
6226 },
6227 "README.md": "# Hello"
6228 }),
6229 )
6230 .await;
6231 let project = Project::test(fs.clone(), ["/root".as_ref()], cx).await;
6232
6233 cx.update(|cx| {
6234 let mut settings = agent_settings::AgentSettings::get_global(cx).clone();
6235 settings.tool_permissions.default = settings::ToolPermissionMode::Allow;
6236 agent_settings::AgentSettings::override_global(settings, cx);
6237 });
6238
6239 let context_server_registry =
6240 cx.new(|cx| crate::ContextServerRegistry::new(project.read(cx).context_server_store(), cx));
6241 let language_registry = project.read_with(cx, |project, _cx| project.languages().clone());
6242 let templates = crate::Templates::new();
6243 let thread = cx.new(|cx| {
6244 crate::Thread::new(
6245 project.clone(),
6246 cx.new(|_cx| prompt_store::ProjectContext::default()),
6247 context_server_registry,
6248 templates.clone(),
6249 None,
6250 cx,
6251 )
6252 });
6253
6254 #[allow(clippy::arc_with_non_send_sync)]
6255 let tool = Arc::new(crate::EditFileTool::new(
6256 project,
6257 thread.downgrade(),
6258 language_registry,
6259 templates,
6260 ));
6261
6262 // Editing a file inside .zed/ should still prompt even with global default: allow,
6263 // because local settings paths are sensitive and require confirmation regardless.
6264 let (event_stream, mut rx) = crate::ToolCallEventStream::test();
6265 let _task = cx.update(|cx| {
6266 tool.run(
6267 ToolInput::resolved(crate::EditFileToolInput {
6268 display_description: "Edit local settings".to_string(),
6269 path: "root/.zed/settings.json".into(),
6270 mode: crate::EditFileMode::Edit,
6271 }),
6272 event_stream,
6273 cx,
6274 )
6275 });
6276
6277 let _update = rx.expect_update_fields().await;
6278 let _auth = rx.expect_authorization().await;
6279}
6280
6281#[gpui::test]
6282async fn test_fetch_tool_deny_rule_blocks_url(cx: &mut TestAppContext) {
6283 init_test(cx);
6284
6285 cx.update(|cx| {
6286 let mut settings = agent_settings::AgentSettings::get_global(cx).clone();
6287 settings.tool_permissions.tools.insert(
6288 FetchTool::NAME.into(),
6289 agent_settings::ToolRules {
6290 default: Some(settings::ToolPermissionMode::Allow),
6291 always_allow: vec![],
6292 always_deny: vec![
6293 agent_settings::CompiledRegex::new(r"internal\.company\.com", false).unwrap(),
6294 ],
6295 always_confirm: vec![],
6296 invalid_patterns: vec![],
6297 },
6298 );
6299 agent_settings::AgentSettings::override_global(settings, cx);
6300 });
6301
6302 let http_client = gpui::http_client::FakeHttpClient::with_200_response();
6303
6304 #[allow(clippy::arc_with_non_send_sync)]
6305 let tool = Arc::new(crate::FetchTool::new(http_client));
6306 let (event_stream, _rx) = crate::ToolCallEventStream::test();
6307
6308 let input: crate::FetchToolInput =
6309 serde_json::from_value(json!({"url": "https://internal.company.com/api"})).unwrap();
6310
6311 let task = cx.update(|cx| tool.run(ToolInput::resolved(input), event_stream, cx));
6312
6313 let result = task.await;
6314 assert!(result.is_err(), "expected fetch to be blocked");
6315 assert!(
6316 result.unwrap_err().contains("blocked"),
6317 "error should mention the fetch was blocked"
6318 );
6319}
6320
6321#[gpui::test]
6322async fn test_fetch_tool_allow_rule_skips_confirmation(cx: &mut TestAppContext) {
6323 init_test(cx);
6324
6325 cx.update(|cx| {
6326 let mut settings = agent_settings::AgentSettings::get_global(cx).clone();
6327 settings.tool_permissions.tools.insert(
6328 FetchTool::NAME.into(),
6329 agent_settings::ToolRules {
6330 default: Some(settings::ToolPermissionMode::Confirm),
6331 always_allow: vec![agent_settings::CompiledRegex::new(r"docs\.rs", false).unwrap()],
6332 always_deny: vec![],
6333 always_confirm: vec![],
6334 invalid_patterns: vec![],
6335 },
6336 );
6337 agent_settings::AgentSettings::override_global(settings, cx);
6338 });
6339
6340 let http_client = gpui::http_client::FakeHttpClient::with_200_response();
6341
6342 #[allow(clippy::arc_with_non_send_sync)]
6343 let tool = Arc::new(crate::FetchTool::new(http_client));
6344 let (event_stream, mut rx) = crate::ToolCallEventStream::test();
6345
6346 let input: crate::FetchToolInput =
6347 serde_json::from_value(json!({"url": "https://docs.rs/some-crate"})).unwrap();
6348
6349 let _task = cx.update(|cx| tool.run(ToolInput::resolved(input), event_stream, cx));
6350
6351 cx.run_until_parked();
6352
6353 let event = rx.try_next();
6354 assert!(
6355 !matches!(event, Ok(Some(Ok(ThreadEvent::ToolCallAuthorization(_))))),
6356 "expected no authorization request for allowed docs.rs URL"
6357 );
6358}
6359
6360#[gpui::test]
6361async fn test_queued_message_ends_turn_at_boundary(cx: &mut TestAppContext) {
6362 init_test(cx);
6363 always_allow_tools(cx);
6364
6365 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
6366 let fake_model = model.as_fake();
6367
6368 // Add a tool so we can simulate tool calls
6369 thread.update(cx, |thread, _cx| {
6370 thread.add_tool(EchoTool);
6371 });
6372
6373 // Start a turn by sending a message
6374 let mut events = thread
6375 .update(cx, |thread, cx| {
6376 thread.send(UserMessageId::new(), ["Use the echo tool"], cx)
6377 })
6378 .unwrap();
6379 cx.run_until_parked();
6380
6381 // Simulate the model making a tool call
6382 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
6383 LanguageModelToolUse {
6384 id: "tool_1".into(),
6385 name: "echo".into(),
6386 raw_input: r#"{"text": "hello"}"#.into(),
6387 input: json!({"text": "hello"}),
6388 is_input_complete: true,
6389 thought_signature: None,
6390 },
6391 ));
6392 fake_model
6393 .send_last_completion_stream_event(LanguageModelCompletionEvent::Stop(StopReason::ToolUse));
6394
6395 // Signal that a message is queued before ending the stream
6396 thread.update(cx, |thread, _cx| {
6397 thread.set_has_queued_message(true);
6398 });
6399
6400 // Now end the stream - tool will run, and the boundary check should see the queue
6401 fake_model.end_last_completion_stream();
6402
6403 // Collect all events until the turn stops
6404 let all_events = collect_events_until_stop(&mut events, cx).await;
6405
6406 // Verify we received the tool call event
6407 let tool_call_ids: Vec<_> = all_events
6408 .iter()
6409 .filter_map(|e| match e {
6410 Ok(ThreadEvent::ToolCall(tc)) => Some(tc.tool_call_id.to_string()),
6411 _ => None,
6412 })
6413 .collect();
6414 assert_eq!(
6415 tool_call_ids,
6416 vec!["tool_1"],
6417 "Should have received a tool call event for our echo tool"
6418 );
6419
6420 // The turn should have stopped with EndTurn
6421 let stop_reasons = stop_events(all_events);
6422 assert_eq!(
6423 stop_reasons,
6424 vec![acp::StopReason::EndTurn],
6425 "Turn should have ended after tool completion due to queued message"
6426 );
6427
6428 // Verify the queued message flag is still set
6429 thread.update(cx, |thread, _cx| {
6430 assert!(
6431 thread.has_queued_message(),
6432 "Should still have queued message flag set"
6433 );
6434 });
6435
6436 // Thread should be idle now
6437 thread.update(cx, |thread, _cx| {
6438 assert!(
6439 thread.is_turn_complete(),
6440 "Thread should not be running after turn ends"
6441 );
6442 });
6443}
6444
6445#[gpui::test]
6446async fn test_streaming_tool_error_breaks_stream_loop_immediately(cx: &mut TestAppContext) {
6447 init_test(cx);
6448 always_allow_tools(cx);
6449
6450 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
6451 let fake_model = model.as_fake();
6452
6453 thread.update(cx, |thread, _cx| {
6454 thread.add_tool(StreamingFailingEchoTool {
6455 receive_chunks_until_failure: 1,
6456 });
6457 });
6458
6459 let _events = thread
6460 .update(cx, |thread, cx| {
6461 thread.send(
6462 UserMessageId::new(),
6463 ["Use the streaming_failing_echo tool"],
6464 cx,
6465 )
6466 })
6467 .unwrap();
6468 cx.run_until_parked();
6469
6470 let tool_use = LanguageModelToolUse {
6471 id: "call_1".into(),
6472 name: StreamingFailingEchoTool::NAME.into(),
6473 raw_input: "hello".into(),
6474 input: json!({}),
6475 is_input_complete: false,
6476 thought_signature: None,
6477 };
6478
6479 fake_model
6480 .send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(tool_use.clone()));
6481
6482 cx.run_until_parked();
6483
6484 let completions = fake_model.pending_completions();
6485 let last_completion = completions.last().unwrap();
6486
6487 assert_eq!(
6488 last_completion.messages[1..],
6489 vec![
6490 LanguageModelRequestMessage {
6491 role: Role::User,
6492 content: vec!["Use the streaming_failing_echo tool".into()],
6493 cache: false,
6494 reasoning_details: None,
6495 },
6496 LanguageModelRequestMessage {
6497 role: Role::Assistant,
6498 content: vec![language_model::MessageContent::ToolUse(tool_use.clone())],
6499 cache: false,
6500 reasoning_details: None,
6501 },
6502 LanguageModelRequestMessage {
6503 role: Role::User,
6504 content: vec![language_model::MessageContent::ToolResult(
6505 LanguageModelToolResult {
6506 tool_use_id: tool_use.id.clone(),
6507 tool_name: tool_use.name,
6508 is_error: true,
6509 content: "failed".into(),
6510 output: Some("failed".into()),
6511 }
6512 )],
6513 cache: true,
6514 reasoning_details: None,
6515 },
6516 ]
6517 );
6518}
6519
6520#[gpui::test]
6521async fn test_streaming_tool_error_waits_for_prior_tools_to_complete(cx: &mut TestAppContext) {
6522 init_test(cx);
6523 always_allow_tools(cx);
6524
6525 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
6526 let fake_model = model.as_fake();
6527
6528 let (complete_streaming_echo_tool_call_tx, complete_streaming_echo_tool_call_rx) =
6529 oneshot::channel();
6530
6531 thread.update(cx, |thread, _cx| {
6532 thread.add_tool(
6533 StreamingEchoTool::new().with_wait_until_complete(complete_streaming_echo_tool_call_rx),
6534 );
6535 thread.add_tool(StreamingFailingEchoTool {
6536 receive_chunks_until_failure: 1,
6537 });
6538 });
6539
6540 let _events = thread
6541 .update(cx, |thread, cx| {
6542 thread.send(
6543 UserMessageId::new(),
6544 ["Use the streaming_echo tool and the streaming_failing_echo tool"],
6545 cx,
6546 )
6547 })
6548 .unwrap();
6549 cx.run_until_parked();
6550
6551 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
6552 LanguageModelToolUse {
6553 id: "call_1".into(),
6554 name: StreamingEchoTool::NAME.into(),
6555 raw_input: "hello".into(),
6556 input: json!({ "text": "hello" }),
6557 is_input_complete: false,
6558 thought_signature: None,
6559 },
6560 ));
6561 let first_tool_use = LanguageModelToolUse {
6562 id: "call_1".into(),
6563 name: StreamingEchoTool::NAME.into(),
6564 raw_input: "hello world".into(),
6565 input: json!({ "text": "hello world" }),
6566 is_input_complete: true,
6567 thought_signature: None,
6568 };
6569 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
6570 first_tool_use.clone(),
6571 ));
6572 let second_tool_use = LanguageModelToolUse {
6573 name: StreamingFailingEchoTool::NAME.into(),
6574 raw_input: "hello".into(),
6575 input: json!({ "text": "hello" }),
6576 is_input_complete: false,
6577 thought_signature: None,
6578 id: "call_2".into(),
6579 };
6580 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
6581 second_tool_use.clone(),
6582 ));
6583
6584 cx.run_until_parked();
6585
6586 complete_streaming_echo_tool_call_tx.send(()).unwrap();
6587
6588 cx.run_until_parked();
6589
6590 let completions = fake_model.pending_completions();
6591 let last_completion = completions.last().unwrap();
6592
6593 assert_eq!(
6594 last_completion.messages[1..],
6595 vec![
6596 LanguageModelRequestMessage {
6597 role: Role::User,
6598 content: vec![
6599 "Use the streaming_echo tool and the streaming_failing_echo tool".into()
6600 ],
6601 cache: false,
6602 reasoning_details: None,
6603 },
6604 LanguageModelRequestMessage {
6605 role: Role::Assistant,
6606 content: vec![
6607 language_model::MessageContent::ToolUse(first_tool_use.clone()),
6608 language_model::MessageContent::ToolUse(second_tool_use.clone())
6609 ],
6610 cache: false,
6611 reasoning_details: None,
6612 },
6613 LanguageModelRequestMessage {
6614 role: Role::User,
6615 content: vec![
6616 language_model::MessageContent::ToolResult(LanguageModelToolResult {
6617 tool_use_id: second_tool_use.id.clone(),
6618 tool_name: second_tool_use.name,
6619 is_error: true,
6620 content: "failed".into(),
6621 output: Some("failed".into()),
6622 }),
6623 language_model::MessageContent::ToolResult(LanguageModelToolResult {
6624 tool_use_id: first_tool_use.id.clone(),
6625 tool_name: first_tool_use.name,
6626 is_error: false,
6627 content: "hello world".into(),
6628 output: Some("hello world".into()),
6629 }),
6630 ],
6631 cache: true,
6632 reasoning_details: None,
6633 },
6634 ]
6635 );
6636}
6637
6638#[gpui::test]
6639async fn test_mid_turn_model_and_settings_refresh(cx: &mut TestAppContext) {
6640 let ThreadTest {
6641 model, thread, fs, ..
6642 } = setup(cx, TestModel::Fake).await;
6643 let fake_model_a = model.as_fake();
6644
6645 thread.update(cx, |thread, _cx| {
6646 thread.add_tool(EchoTool);
6647 thread.add_tool(DelayTool);
6648 });
6649
6650 // Set up two profiles: profile-a has both tools, profile-b has only DelayTool.
6651 fs.insert_file(
6652 paths::settings_file(),
6653 json!({
6654 "agent": {
6655 "profiles": {
6656 "profile-a": {
6657 "name": "Profile A",
6658 "tools": {
6659 EchoTool::NAME: true,
6660 DelayTool::NAME: true,
6661 }
6662 },
6663 "profile-b": {
6664 "name": "Profile B",
6665 "tools": {
6666 DelayTool::NAME: true,
6667 }
6668 }
6669 }
6670 }
6671 })
6672 .to_string()
6673 .into_bytes(),
6674 )
6675 .await;
6676 cx.run_until_parked();
6677
6678 thread.update(cx, |thread, cx| {
6679 thread.set_profile(AgentProfileId("profile-a".into()), cx);
6680 thread.set_thinking_enabled(false, cx);
6681 });
6682
6683 // Send a message — first iteration starts with model A, profile-a, thinking off.
6684 thread
6685 .update(cx, |thread, cx| {
6686 thread.send(UserMessageId::new(), ["test mid-turn refresh"], cx)
6687 })
6688 .unwrap();
6689 cx.run_until_parked();
6690
6691 // Verify first request has both tools and thinking disabled.
6692 let completions = fake_model_a.pending_completions();
6693 assert_eq!(completions.len(), 1);
6694 let first_tools = tool_names_for_completion(&completions[0]);
6695 assert_eq!(first_tools, vec![DelayTool::NAME, EchoTool::NAME]);
6696 assert!(!completions[0].thinking_allowed);
6697
6698 // Model A responds with an echo tool call.
6699 fake_model_a.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
6700 LanguageModelToolUse {
6701 id: "tool_1".into(),
6702 name: "echo".into(),
6703 raw_input: r#"{"text":"hello"}"#.into(),
6704 input: json!({"text": "hello"}),
6705 is_input_complete: true,
6706 thought_signature: None,
6707 },
6708 ));
6709 fake_model_a.end_last_completion_stream();
6710
6711 // Before the next iteration runs, switch to profile-b (only DelayTool),
6712 // swap in a new model, and enable thinking.
6713 let fake_model_b = Arc::new(FakeLanguageModel::with_id_and_thinking(
6714 "test-provider",
6715 "model-b",
6716 "Model B",
6717 true,
6718 ));
6719 thread.update(cx, |thread, cx| {
6720 thread.set_profile(AgentProfileId("profile-b".into()), cx);
6721 thread.set_model(fake_model_b.clone() as Arc<dyn LanguageModel>, cx);
6722 thread.set_thinking_enabled(true, cx);
6723 });
6724
6725 // Run until parked — processes the echo tool call, loops back, picks up
6726 // the new model/profile/thinking, and makes a second request to model B.
6727 cx.run_until_parked();
6728
6729 // The second request should have gone to model B.
6730 let model_b_completions = fake_model_b.pending_completions();
6731 assert_eq!(
6732 model_b_completions.len(),
6733 1,
6734 "second request should go to model B"
6735 );
6736
6737 // Profile-b only has DelayTool, so echo should be gone.
6738 let second_tools = tool_names_for_completion(&model_b_completions[0]);
6739 assert_eq!(second_tools, vec![DelayTool::NAME]);
6740
6741 // Thinking should now be enabled.
6742 assert!(model_b_completions[0].thinking_allowed);
6743}