1use super::*;
2use acp_thread::{
3 AgentConnection, AgentModelGroupName, AgentModelList, PermissionOptions, ThreadStatus,
4 UserMessageId,
5};
6use agent_client_protocol::{self as acp};
7use agent_settings::AgentProfileId;
8use anyhow::Result;
9use client::{Client, RefreshLlmTokenListener, UserStore};
10use collections::IndexMap;
11use context_server::{ContextServer, ContextServerCommand, ContextServerId};
12use feature_flags::FeatureFlagAppExt as _;
13use fs::{FakeFs, Fs};
14use futures::{
15 FutureExt as _, StreamExt,
16 channel::{
17 mpsc::{self, UnboundedReceiver},
18 oneshot,
19 },
20 future::{Fuse, Shared},
21};
22use gpui::{
23 App, AppContext, AsyncApp, Entity, Task, TestAppContext, UpdateGlobal,
24 http_client::FakeHttpClient,
25};
26use indoc::indoc;
27use language_model::{
28 CompletionIntent, LanguageModel, LanguageModelCompletionError, LanguageModelCompletionEvent,
29 LanguageModelId, LanguageModelProviderName, LanguageModelRegistry, LanguageModelRequest,
30 LanguageModelRequestMessage, LanguageModelToolResult, LanguageModelToolSchemaFormat,
31 LanguageModelToolUse, MessageContent, Role, StopReason, TokenUsage,
32 fake_provider::FakeLanguageModel,
33};
34use pretty_assertions::assert_eq;
35use project::{
36 Project, context_server_store::ContextServerStore, project_settings::ProjectSettings,
37};
38use prompt_store::ProjectContext;
39use reqwest_client::ReqwestClient;
40use schemars::JsonSchema;
41use serde::{Deserialize, Serialize};
42use serde_json::json;
43use settings::{Settings, SettingsStore};
44use std::{
45 path::Path,
46 pin::Pin,
47 rc::Rc,
48 sync::{
49 Arc,
50 atomic::{AtomicBool, AtomicUsize, Ordering},
51 },
52 time::Duration,
53};
54use util::path;
55
56mod edit_file_thread_test;
57mod test_tools;
58use test_tools::*;
59
60pub(crate) fn init_test(cx: &mut TestAppContext) {
61 cx.update(|cx| {
62 let settings_store = SettingsStore::test(cx);
63 cx.set_global(settings_store);
64 });
65}
66
67pub(crate) struct FakeTerminalHandle {
68 killed: Arc<AtomicBool>,
69 stopped_by_user: Arc<AtomicBool>,
70 exit_sender: std::cell::RefCell<Option<futures::channel::oneshot::Sender<()>>>,
71 wait_for_exit: Shared<Task<acp::TerminalExitStatus>>,
72 output: acp::TerminalOutputResponse,
73 id: acp::TerminalId,
74}
75
76impl FakeTerminalHandle {
77 pub(crate) fn new_never_exits(cx: &mut App) -> Self {
78 let killed = Arc::new(AtomicBool::new(false));
79 let stopped_by_user = Arc::new(AtomicBool::new(false));
80
81 let (exit_sender, exit_receiver) = futures::channel::oneshot::channel();
82
83 let wait_for_exit = cx
84 .spawn(async move |_cx| {
85 // Wait for the exit signal (sent when kill() is called)
86 let _ = exit_receiver.await;
87 acp::TerminalExitStatus::new()
88 })
89 .shared();
90
91 Self {
92 killed,
93 stopped_by_user,
94 exit_sender: std::cell::RefCell::new(Some(exit_sender)),
95 wait_for_exit,
96 output: acp::TerminalOutputResponse::new("partial output".to_string(), false),
97 id: acp::TerminalId::new("fake_terminal".to_string()),
98 }
99 }
100
101 pub(crate) fn new_with_immediate_exit(cx: &mut App, exit_code: u32) -> Self {
102 let killed = Arc::new(AtomicBool::new(false));
103 let stopped_by_user = Arc::new(AtomicBool::new(false));
104 let (exit_sender, _exit_receiver) = futures::channel::oneshot::channel();
105
106 let wait_for_exit = cx
107 .spawn(async move |_cx| acp::TerminalExitStatus::new().exit_code(exit_code))
108 .shared();
109
110 Self {
111 killed,
112 stopped_by_user,
113 exit_sender: std::cell::RefCell::new(Some(exit_sender)),
114 wait_for_exit,
115 output: acp::TerminalOutputResponse::new("command output".to_string(), false),
116 id: acp::TerminalId::new("fake_terminal".to_string()),
117 }
118 }
119
120 pub(crate) fn was_killed(&self) -> bool {
121 self.killed.load(Ordering::SeqCst)
122 }
123
124 pub(crate) fn set_stopped_by_user(&self, stopped: bool) {
125 self.stopped_by_user.store(stopped, Ordering::SeqCst);
126 }
127
128 pub(crate) fn signal_exit(&self) {
129 if let Some(sender) = self.exit_sender.borrow_mut().take() {
130 let _ = sender.send(());
131 }
132 }
133}
134
135impl crate::TerminalHandle for FakeTerminalHandle {
136 fn id(&self, _cx: &AsyncApp) -> Result<acp::TerminalId> {
137 Ok(self.id.clone())
138 }
139
140 fn current_output(&self, _cx: &AsyncApp) -> Result<acp::TerminalOutputResponse> {
141 Ok(self.output.clone())
142 }
143
144 fn wait_for_exit(&self, _cx: &AsyncApp) -> Result<Shared<Task<acp::TerminalExitStatus>>> {
145 Ok(self.wait_for_exit.clone())
146 }
147
148 fn kill(&self, _cx: &AsyncApp) -> Result<()> {
149 self.killed.store(true, Ordering::SeqCst);
150 self.signal_exit();
151 Ok(())
152 }
153
154 fn was_stopped_by_user(&self, _cx: &AsyncApp) -> Result<bool> {
155 Ok(self.stopped_by_user.load(Ordering::SeqCst))
156 }
157}
158
159struct FakeSubagentHandle {
160 session_id: acp::SessionId,
161 send_task: Shared<Task<String>>,
162}
163
164impl SubagentHandle for FakeSubagentHandle {
165 fn id(&self) -> acp::SessionId {
166 self.session_id.clone()
167 }
168
169 fn num_entries(&self, _cx: &App) -> usize {
170 unimplemented!()
171 }
172
173 fn send(&self, _message: String, cx: &AsyncApp) -> Task<Result<String>> {
174 let task = self.send_task.clone();
175 cx.background_spawn(async move { Ok(task.await) })
176 }
177}
178
179#[derive(Default)]
180pub(crate) struct FakeThreadEnvironment {
181 terminal_handle: Option<Rc<FakeTerminalHandle>>,
182 subagent_handle: Option<Rc<FakeSubagentHandle>>,
183 terminal_creations: Arc<AtomicUsize>,
184}
185
186impl FakeThreadEnvironment {
187 pub(crate) fn with_terminal(self, terminal_handle: FakeTerminalHandle) -> Self {
188 Self {
189 terminal_handle: Some(terminal_handle.into()),
190 ..self
191 }
192 }
193
194 pub(crate) fn terminal_creation_count(&self) -> usize {
195 self.terminal_creations.load(Ordering::SeqCst)
196 }
197}
198
199impl crate::ThreadEnvironment for FakeThreadEnvironment {
200 fn create_terminal(
201 &self,
202 _command: String,
203 _cwd: Option<std::path::PathBuf>,
204 _output_byte_limit: Option<u64>,
205 _cx: &mut AsyncApp,
206 ) -> Task<Result<Rc<dyn crate::TerminalHandle>>> {
207 self.terminal_creations.fetch_add(1, Ordering::SeqCst);
208 let handle = self
209 .terminal_handle
210 .clone()
211 .expect("Terminal handle not available on FakeThreadEnvironment");
212 Task::ready(Ok(handle as Rc<dyn crate::TerminalHandle>))
213 }
214
215 fn create_subagent(&self, _label: String, _cx: &mut App) -> Result<Rc<dyn SubagentHandle>> {
216 Ok(self
217 .subagent_handle
218 .clone()
219 .expect("Subagent handle not available on FakeThreadEnvironment")
220 as Rc<dyn SubagentHandle>)
221 }
222}
223
224/// Environment that creates multiple independent terminal handles for testing concurrent terminals.
225struct MultiTerminalEnvironment {
226 handles: std::cell::RefCell<Vec<Rc<FakeTerminalHandle>>>,
227}
228
229impl MultiTerminalEnvironment {
230 fn new() -> Self {
231 Self {
232 handles: std::cell::RefCell::new(Vec::new()),
233 }
234 }
235
236 fn handles(&self) -> Vec<Rc<FakeTerminalHandle>> {
237 self.handles.borrow().clone()
238 }
239}
240
241impl crate::ThreadEnvironment for MultiTerminalEnvironment {
242 fn create_terminal(
243 &self,
244 _command: String,
245 _cwd: Option<std::path::PathBuf>,
246 _output_byte_limit: Option<u64>,
247 cx: &mut AsyncApp,
248 ) -> Task<Result<Rc<dyn crate::TerminalHandle>>> {
249 let handle = Rc::new(cx.update(|cx| FakeTerminalHandle::new_never_exits(cx)));
250 self.handles.borrow_mut().push(handle.clone());
251 Task::ready(Ok(handle as Rc<dyn crate::TerminalHandle>))
252 }
253
254 fn create_subagent(&self, _label: String, _cx: &mut App) -> Result<Rc<dyn SubagentHandle>> {
255 unimplemented!()
256 }
257}
258
259fn always_allow_tools(cx: &mut TestAppContext) {
260 cx.update(|cx| {
261 let mut settings = agent_settings::AgentSettings::get_global(cx).clone();
262 settings.tool_permissions.default = settings::ToolPermissionMode::Allow;
263 agent_settings::AgentSettings::override_global(settings, cx);
264 });
265}
266
267#[gpui::test]
268async fn test_echo(cx: &mut TestAppContext) {
269 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
270 let fake_model = model.as_fake();
271
272 let events = thread
273 .update(cx, |thread, cx| {
274 thread.send(UserMessageId::new(), ["Testing: Reply with 'Hello'"], cx)
275 })
276 .unwrap();
277 cx.run_until_parked();
278 fake_model.send_last_completion_stream_text_chunk("Hello");
279 fake_model
280 .send_last_completion_stream_event(LanguageModelCompletionEvent::Stop(StopReason::EndTurn));
281 fake_model.end_last_completion_stream();
282
283 let events = events.collect().await;
284 thread.update(cx, |thread, _cx| {
285 assert_eq!(
286 thread.last_received_or_pending_message().unwrap().role(),
287 Role::Assistant
288 );
289 assert_eq!(
290 thread
291 .last_received_or_pending_message()
292 .unwrap()
293 .to_markdown(),
294 "Hello\n"
295 )
296 });
297 assert_eq!(stop_events(events), vec![acp::StopReason::EndTurn]);
298}
299
300#[gpui::test]
301async fn test_terminal_tool_timeout_kills_handle(cx: &mut TestAppContext) {
302 init_test(cx);
303 always_allow_tools(cx);
304
305 let fs = FakeFs::new(cx.executor());
306 let project = Project::test(fs, [], cx).await;
307
308 let environment = Rc::new(cx.update(|cx| {
309 FakeThreadEnvironment::default().with_terminal(FakeTerminalHandle::new_never_exits(cx))
310 }));
311 let handle = environment.terminal_handle.clone().unwrap();
312
313 #[allow(clippy::arc_with_non_send_sync)]
314 let tool = Arc::new(crate::TerminalTool::new(project, environment));
315 let (event_stream, mut rx) = crate::ToolCallEventStream::test();
316
317 let task = cx.update(|cx| {
318 tool.run(
319 ToolInput::resolved(crate::TerminalToolInput {
320 command: "sleep 1000".to_string(),
321 cd: ".".to_string(),
322 timeout_ms: Some(5),
323 }),
324 event_stream,
325 cx,
326 )
327 });
328
329 let update = rx.expect_update_fields().await;
330 assert!(
331 update.content.iter().any(|blocks| {
332 blocks
333 .iter()
334 .any(|c| matches!(c, acp::ToolCallContent::Terminal(_)))
335 }),
336 "expected tool call update to include terminal content"
337 );
338
339 let mut task_future: Pin<Box<Fuse<Task<Result<String, String>>>>> = Box::pin(task.fuse());
340
341 let deadline = std::time::Instant::now() + Duration::from_millis(500);
342 loop {
343 if let Some(result) = task_future.as_mut().now_or_never() {
344 let result = result.expect("terminal tool task should complete");
345
346 assert!(
347 handle.was_killed(),
348 "expected terminal handle to be killed on timeout"
349 );
350 assert!(
351 result.contains("partial output"),
352 "expected result to include terminal output, got: {result}"
353 );
354 return;
355 }
356
357 if std::time::Instant::now() >= deadline {
358 panic!("timed out waiting for terminal tool task to complete");
359 }
360
361 cx.run_until_parked();
362 cx.background_executor.timer(Duration::from_millis(1)).await;
363 }
364}
365
366#[gpui::test]
367#[ignore]
368async fn test_terminal_tool_without_timeout_does_not_kill_handle(cx: &mut TestAppContext) {
369 init_test(cx);
370 always_allow_tools(cx);
371
372 let fs = FakeFs::new(cx.executor());
373 let project = Project::test(fs, [], cx).await;
374
375 let environment = Rc::new(cx.update(|cx| {
376 FakeThreadEnvironment::default().with_terminal(FakeTerminalHandle::new_never_exits(cx))
377 }));
378 let handle = environment.terminal_handle.clone().unwrap();
379
380 #[allow(clippy::arc_with_non_send_sync)]
381 let tool = Arc::new(crate::TerminalTool::new(project, environment));
382 let (event_stream, mut rx) = crate::ToolCallEventStream::test();
383
384 let _task = cx.update(|cx| {
385 tool.run(
386 ToolInput::resolved(crate::TerminalToolInput {
387 command: "sleep 1000".to_string(),
388 cd: ".".to_string(),
389 timeout_ms: None,
390 }),
391 event_stream,
392 cx,
393 )
394 });
395
396 let update = rx.expect_update_fields().await;
397 assert!(
398 update.content.iter().any(|blocks| {
399 blocks
400 .iter()
401 .any(|c| matches!(c, acp::ToolCallContent::Terminal(_)))
402 }),
403 "expected tool call update to include terminal content"
404 );
405
406 cx.background_executor
407 .timer(Duration::from_millis(25))
408 .await;
409
410 assert!(
411 !handle.was_killed(),
412 "did not expect terminal handle to be killed without a timeout"
413 );
414}
415
416#[gpui::test]
417async fn test_thinking(cx: &mut TestAppContext) {
418 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
419 let fake_model = model.as_fake();
420
421 let events = thread
422 .update(cx, |thread, cx| {
423 thread.send(
424 UserMessageId::new(),
425 [indoc! {"
426 Testing:
427
428 Generate a thinking step where you just think the word 'Think',
429 and have your final answer be 'Hello'
430 "}],
431 cx,
432 )
433 })
434 .unwrap();
435 cx.run_until_parked();
436 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::Thinking {
437 text: "Think".to_string(),
438 signature: None,
439 });
440 fake_model.send_last_completion_stream_text_chunk("Hello");
441 fake_model
442 .send_last_completion_stream_event(LanguageModelCompletionEvent::Stop(StopReason::EndTurn));
443 fake_model.end_last_completion_stream();
444
445 let events = events.collect().await;
446 thread.update(cx, |thread, _cx| {
447 assert_eq!(
448 thread.last_received_or_pending_message().unwrap().role(),
449 Role::Assistant
450 );
451 assert_eq!(
452 thread
453 .last_received_or_pending_message()
454 .unwrap()
455 .to_markdown(),
456 indoc! {"
457 <think>Think</think>
458 Hello
459 "}
460 )
461 });
462 assert_eq!(stop_events(events), vec![acp::StopReason::EndTurn]);
463}
464
465#[gpui::test]
466async fn test_system_prompt(cx: &mut TestAppContext) {
467 let ThreadTest {
468 model,
469 thread,
470 project_context,
471 ..
472 } = setup(cx, TestModel::Fake).await;
473 let fake_model = model.as_fake();
474
475 project_context.update(cx, |project_context, _cx| {
476 project_context.shell = "test-shell".into()
477 });
478 thread.update(cx, |thread, _| thread.add_tool(EchoTool));
479 thread
480 .update(cx, |thread, cx| {
481 thread.send(UserMessageId::new(), ["abc"], cx)
482 })
483 .unwrap();
484 cx.run_until_parked();
485 let mut pending_completions = fake_model.pending_completions();
486 assert_eq!(
487 pending_completions.len(),
488 1,
489 "unexpected pending completions: {:?}",
490 pending_completions
491 );
492
493 let pending_completion = pending_completions.pop().unwrap();
494 assert_eq!(pending_completion.messages[0].role, Role::System);
495
496 let system_message = &pending_completion.messages[0];
497 let system_prompt = system_message.content[0].to_str().unwrap();
498 assert!(
499 system_prompt.contains("test-shell"),
500 "unexpected system message: {:?}",
501 system_message
502 );
503 assert!(
504 system_prompt.contains("## Fixing Diagnostics"),
505 "unexpected system message: {:?}",
506 system_message
507 );
508}
509
510#[gpui::test]
511async fn test_system_prompt_without_tools(cx: &mut TestAppContext) {
512 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
513 let fake_model = model.as_fake();
514
515 thread
516 .update(cx, |thread, cx| {
517 thread.send(UserMessageId::new(), ["abc"], cx)
518 })
519 .unwrap();
520 cx.run_until_parked();
521 let mut pending_completions = fake_model.pending_completions();
522 assert_eq!(
523 pending_completions.len(),
524 1,
525 "unexpected pending completions: {:?}",
526 pending_completions
527 );
528
529 let pending_completion = pending_completions.pop().unwrap();
530 assert_eq!(pending_completion.messages[0].role, Role::System);
531
532 let system_message = &pending_completion.messages[0];
533 let system_prompt = system_message.content[0].to_str().unwrap();
534 assert!(
535 !system_prompt.contains("## Tool Use"),
536 "unexpected system message: {:?}",
537 system_message
538 );
539 assert!(
540 !system_prompt.contains("## Fixing Diagnostics"),
541 "unexpected system message: {:?}",
542 system_message
543 );
544}
545
546#[gpui::test]
547async fn test_prompt_caching(cx: &mut TestAppContext) {
548 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
549 let fake_model = model.as_fake();
550
551 // Send initial user message and verify it's cached
552 thread
553 .update(cx, |thread, cx| {
554 thread.send(UserMessageId::new(), ["Message 1"], cx)
555 })
556 .unwrap();
557 cx.run_until_parked();
558
559 let completion = fake_model.pending_completions().pop().unwrap();
560 assert_eq!(
561 completion.messages[1..],
562 vec![LanguageModelRequestMessage {
563 role: Role::User,
564 content: vec!["Message 1".into()],
565 cache: true,
566 reasoning_details: None,
567 }]
568 );
569 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::Text(
570 "Response to Message 1".into(),
571 ));
572 fake_model.end_last_completion_stream();
573 cx.run_until_parked();
574
575 // Send another user message and verify only the latest is cached
576 thread
577 .update(cx, |thread, cx| {
578 thread.send(UserMessageId::new(), ["Message 2"], cx)
579 })
580 .unwrap();
581 cx.run_until_parked();
582
583 let completion = fake_model.pending_completions().pop().unwrap();
584 assert_eq!(
585 completion.messages[1..],
586 vec![
587 LanguageModelRequestMessage {
588 role: Role::User,
589 content: vec!["Message 1".into()],
590 cache: false,
591 reasoning_details: None,
592 },
593 LanguageModelRequestMessage {
594 role: Role::Assistant,
595 content: vec!["Response to Message 1".into()],
596 cache: false,
597 reasoning_details: None,
598 },
599 LanguageModelRequestMessage {
600 role: Role::User,
601 content: vec!["Message 2".into()],
602 cache: true,
603 reasoning_details: None,
604 }
605 ]
606 );
607 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::Text(
608 "Response to Message 2".into(),
609 ));
610 fake_model.end_last_completion_stream();
611 cx.run_until_parked();
612
613 // Simulate a tool call and verify that the latest tool result is cached
614 thread.update(cx, |thread, _| thread.add_tool(EchoTool));
615 thread
616 .update(cx, |thread, cx| {
617 thread.send(UserMessageId::new(), ["Use the echo tool"], cx)
618 })
619 .unwrap();
620 cx.run_until_parked();
621
622 let tool_use = LanguageModelToolUse {
623 id: "tool_1".into(),
624 name: EchoTool::NAME.into(),
625 raw_input: json!({"text": "test"}).to_string(),
626 input: json!({"text": "test"}),
627 is_input_complete: true,
628 thought_signature: None,
629 };
630 fake_model
631 .send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(tool_use.clone()));
632 fake_model.end_last_completion_stream();
633 cx.run_until_parked();
634
635 let completion = fake_model.pending_completions().pop().unwrap();
636 let tool_result = LanguageModelToolResult {
637 tool_use_id: "tool_1".into(),
638 tool_name: EchoTool::NAME.into(),
639 is_error: false,
640 content: "test".into(),
641 output: Some("test".into()),
642 };
643 assert_eq!(
644 completion.messages[1..],
645 vec![
646 LanguageModelRequestMessage {
647 role: Role::User,
648 content: vec!["Message 1".into()],
649 cache: false,
650 reasoning_details: None,
651 },
652 LanguageModelRequestMessage {
653 role: Role::Assistant,
654 content: vec!["Response to Message 1".into()],
655 cache: false,
656 reasoning_details: None,
657 },
658 LanguageModelRequestMessage {
659 role: Role::User,
660 content: vec!["Message 2".into()],
661 cache: false,
662 reasoning_details: None,
663 },
664 LanguageModelRequestMessage {
665 role: Role::Assistant,
666 content: vec!["Response to Message 2".into()],
667 cache: false,
668 reasoning_details: None,
669 },
670 LanguageModelRequestMessage {
671 role: Role::User,
672 content: vec!["Use the echo tool".into()],
673 cache: false,
674 reasoning_details: None,
675 },
676 LanguageModelRequestMessage {
677 role: Role::Assistant,
678 content: vec![MessageContent::ToolUse(tool_use)],
679 cache: false,
680 reasoning_details: None,
681 },
682 LanguageModelRequestMessage {
683 role: Role::User,
684 content: vec![MessageContent::ToolResult(tool_result)],
685 cache: true,
686 reasoning_details: None,
687 }
688 ]
689 );
690}
691
692#[gpui::test]
693#[cfg_attr(not(feature = "e2e"), ignore)]
694async fn test_basic_tool_calls(cx: &mut TestAppContext) {
695 let ThreadTest { thread, .. } = setup(cx, TestModel::Sonnet4).await;
696
697 // Test a tool call that's likely to complete *before* streaming stops.
698 let events = thread
699 .update(cx, |thread, cx| {
700 thread.add_tool(EchoTool);
701 thread.send(
702 UserMessageId::new(),
703 ["Now test the echo tool with 'Hello'. Does it work? Say 'Yes' or 'No'."],
704 cx,
705 )
706 })
707 .unwrap()
708 .collect()
709 .await;
710 assert_eq!(stop_events(events), vec![acp::StopReason::EndTurn]);
711
712 // Test a tool calls that's likely to complete *after* streaming stops.
713 let events = thread
714 .update(cx, |thread, cx| {
715 thread.remove_tool(&EchoTool::NAME);
716 thread.add_tool(DelayTool);
717 thread.send(
718 UserMessageId::new(),
719 [
720 "Now call the delay tool with 200ms.",
721 "When the timer goes off, then you echo the output of the tool.",
722 ],
723 cx,
724 )
725 })
726 .unwrap()
727 .collect()
728 .await;
729 assert_eq!(stop_events(events), vec![acp::StopReason::EndTurn]);
730 thread.update(cx, |thread, _cx| {
731 assert!(
732 thread
733 .last_received_or_pending_message()
734 .unwrap()
735 .as_agent_message()
736 .unwrap()
737 .content
738 .iter()
739 .any(|content| {
740 if let AgentMessageContent::Text(text) = content {
741 text.contains("Ding")
742 } else {
743 false
744 }
745 }),
746 "{}",
747 thread.to_markdown()
748 );
749 });
750}
751
752#[gpui::test]
753#[cfg_attr(not(feature = "e2e"), ignore)]
754async fn test_streaming_tool_calls(cx: &mut TestAppContext) {
755 let ThreadTest { thread, .. } = setup(cx, TestModel::Sonnet4).await;
756
757 // Test a tool call that's likely to complete *before* streaming stops.
758 let mut events = thread
759 .update(cx, |thread, cx| {
760 thread.add_tool(WordListTool);
761 thread.send(UserMessageId::new(), ["Test the word_list tool."], cx)
762 })
763 .unwrap();
764
765 let mut saw_partial_tool_use = false;
766 while let Some(event) = events.next().await {
767 if let Ok(ThreadEvent::ToolCall(tool_call)) = event {
768 thread.update(cx, |thread, _cx| {
769 // Look for a tool use in the thread's last message
770 let message = thread.last_received_or_pending_message().unwrap();
771 let agent_message = message.as_agent_message().unwrap();
772 let last_content = agent_message.content.last().unwrap();
773 if let AgentMessageContent::ToolUse(last_tool_use) = last_content {
774 assert_eq!(last_tool_use.name.as_ref(), "word_list");
775 if tool_call.status == acp::ToolCallStatus::Pending {
776 if !last_tool_use.is_input_complete
777 && last_tool_use.input.get("g").is_none()
778 {
779 saw_partial_tool_use = true;
780 }
781 } else {
782 last_tool_use
783 .input
784 .get("a")
785 .expect("'a' has streamed because input is now complete");
786 last_tool_use
787 .input
788 .get("g")
789 .expect("'g' has streamed because input is now complete");
790 }
791 } else {
792 panic!("last content should be a tool use");
793 }
794 });
795 }
796 }
797
798 assert!(
799 saw_partial_tool_use,
800 "should see at least one partially streamed tool use in the history"
801 );
802}
803
804#[gpui::test]
805async fn test_tool_authorization(cx: &mut TestAppContext) {
806 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
807 let fake_model = model.as_fake();
808
809 let mut events = thread
810 .update(cx, |thread, cx| {
811 thread.add_tool(ToolRequiringPermission);
812 thread.send(UserMessageId::new(), ["abc"], cx)
813 })
814 .unwrap();
815 cx.run_until_parked();
816 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
817 LanguageModelToolUse {
818 id: "tool_id_1".into(),
819 name: ToolRequiringPermission::NAME.into(),
820 raw_input: "{}".into(),
821 input: json!({}),
822 is_input_complete: true,
823 thought_signature: None,
824 },
825 ));
826 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
827 LanguageModelToolUse {
828 id: "tool_id_2".into(),
829 name: ToolRequiringPermission::NAME.into(),
830 raw_input: "{}".into(),
831 input: json!({}),
832 is_input_complete: true,
833 thought_signature: None,
834 },
835 ));
836 fake_model.end_last_completion_stream();
837 let tool_call_auth_1 = next_tool_call_authorization(&mut events).await;
838 let tool_call_auth_2 = next_tool_call_authorization(&mut events).await;
839
840 // Approve the first - send "allow" option_id (UI transforms "once" to "allow")
841 tool_call_auth_1
842 .response
843 .send(acp_thread::SelectedPermissionOutcome::new(
844 acp::PermissionOptionId::new("allow"),
845 acp::PermissionOptionKind::AllowOnce,
846 ))
847 .unwrap();
848 cx.run_until_parked();
849
850 // Reject the second - send "deny" option_id directly since Deny is now a button
851 tool_call_auth_2
852 .response
853 .send(acp_thread::SelectedPermissionOutcome::new(
854 acp::PermissionOptionId::new("deny"),
855 acp::PermissionOptionKind::RejectOnce,
856 ))
857 .unwrap();
858 cx.run_until_parked();
859
860 let completion = fake_model.pending_completions().pop().unwrap();
861 let message = completion.messages.last().unwrap();
862 assert_eq!(
863 message.content,
864 vec![
865 language_model::MessageContent::ToolResult(LanguageModelToolResult {
866 tool_use_id: tool_call_auth_1.tool_call.tool_call_id.0.to_string().into(),
867 tool_name: ToolRequiringPermission::NAME.into(),
868 is_error: false,
869 content: "Allowed".into(),
870 output: Some("Allowed".into())
871 }),
872 language_model::MessageContent::ToolResult(LanguageModelToolResult {
873 tool_use_id: tool_call_auth_2.tool_call.tool_call_id.0.to_string().into(),
874 tool_name: ToolRequiringPermission::NAME.into(),
875 is_error: true,
876 content: "Permission to run tool denied by user".into(),
877 output: Some("Permission to run tool denied by user".into())
878 })
879 ]
880 );
881
882 // Simulate yet another tool call.
883 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
884 LanguageModelToolUse {
885 id: "tool_id_3".into(),
886 name: ToolRequiringPermission::NAME.into(),
887 raw_input: "{}".into(),
888 input: json!({}),
889 is_input_complete: true,
890 thought_signature: None,
891 },
892 ));
893 fake_model.end_last_completion_stream();
894
895 // Respond by always allowing tools - send transformed option_id
896 // (UI transforms "always:tool_requiring_permission" to "always_allow:tool_requiring_permission")
897 let tool_call_auth_3 = next_tool_call_authorization(&mut events).await;
898 tool_call_auth_3
899 .response
900 .send(acp_thread::SelectedPermissionOutcome::new(
901 acp::PermissionOptionId::new("always_allow:tool_requiring_permission"),
902 acp::PermissionOptionKind::AllowAlways,
903 ))
904 .unwrap();
905 cx.run_until_parked();
906 let completion = fake_model.pending_completions().pop().unwrap();
907 let message = completion.messages.last().unwrap();
908 assert_eq!(
909 message.content,
910 vec![language_model::MessageContent::ToolResult(
911 LanguageModelToolResult {
912 tool_use_id: tool_call_auth_3.tool_call.tool_call_id.0.to_string().into(),
913 tool_name: ToolRequiringPermission::NAME.into(),
914 is_error: false,
915 content: "Allowed".into(),
916 output: Some("Allowed".into())
917 }
918 )]
919 );
920
921 // Simulate a final tool call, ensuring we don't trigger authorization.
922 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
923 LanguageModelToolUse {
924 id: "tool_id_4".into(),
925 name: ToolRequiringPermission::NAME.into(),
926 raw_input: "{}".into(),
927 input: json!({}),
928 is_input_complete: true,
929 thought_signature: None,
930 },
931 ));
932 fake_model.end_last_completion_stream();
933 cx.run_until_parked();
934 let completion = fake_model.pending_completions().pop().unwrap();
935 let message = completion.messages.last().unwrap();
936 assert_eq!(
937 message.content,
938 vec![language_model::MessageContent::ToolResult(
939 LanguageModelToolResult {
940 tool_use_id: "tool_id_4".into(),
941 tool_name: ToolRequiringPermission::NAME.into(),
942 is_error: false,
943 content: "Allowed".into(),
944 output: Some("Allowed".into())
945 }
946 )]
947 );
948}
949
950#[gpui::test]
951async fn test_tool_hallucination(cx: &mut TestAppContext) {
952 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
953 let fake_model = model.as_fake();
954
955 let mut events = thread
956 .update(cx, |thread, cx| {
957 thread.send(UserMessageId::new(), ["abc"], cx)
958 })
959 .unwrap();
960 cx.run_until_parked();
961 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
962 LanguageModelToolUse {
963 id: "tool_id_1".into(),
964 name: "nonexistent_tool".into(),
965 raw_input: "{}".into(),
966 input: json!({}),
967 is_input_complete: true,
968 thought_signature: None,
969 },
970 ));
971 fake_model.end_last_completion_stream();
972
973 let tool_call = expect_tool_call(&mut events).await;
974 assert_eq!(tool_call.title, "nonexistent_tool");
975 assert_eq!(tool_call.status, acp::ToolCallStatus::Pending);
976 let update = expect_tool_call_update_fields(&mut events).await;
977 assert_eq!(update.fields.status, Some(acp::ToolCallStatus::Failed));
978}
979
980async fn expect_tool_call(events: &mut UnboundedReceiver<Result<ThreadEvent>>) -> acp::ToolCall {
981 let event = events
982 .next()
983 .await
984 .expect("no tool call authorization event received")
985 .unwrap();
986 match event {
987 ThreadEvent::ToolCall(tool_call) => tool_call,
988 event => {
989 panic!("Unexpected event {event:?}");
990 }
991 }
992}
993
994async fn expect_tool_call_update_fields(
995 events: &mut UnboundedReceiver<Result<ThreadEvent>>,
996) -> acp::ToolCallUpdate {
997 let event = events
998 .next()
999 .await
1000 .expect("no tool call authorization event received")
1001 .unwrap();
1002 match event {
1003 ThreadEvent::ToolCallUpdate(acp_thread::ToolCallUpdate::UpdateFields(update)) => update,
1004 event => {
1005 panic!("Unexpected event {event:?}");
1006 }
1007 }
1008}
1009
1010async fn expect_plan(events: &mut UnboundedReceiver<Result<ThreadEvent>>) -> acp::Plan {
1011 let event = events
1012 .next()
1013 .await
1014 .expect("no plan event received")
1015 .unwrap();
1016 match event {
1017 ThreadEvent::Plan(plan) => plan,
1018 event => {
1019 panic!("Unexpected event {event:?}");
1020 }
1021 }
1022}
1023
1024async fn next_tool_call_authorization(
1025 events: &mut UnboundedReceiver<Result<ThreadEvent>>,
1026) -> ToolCallAuthorization {
1027 loop {
1028 let event = events
1029 .next()
1030 .await
1031 .expect("no tool call authorization event received")
1032 .unwrap();
1033 if let ThreadEvent::ToolCallAuthorization(tool_call_authorization) = event {
1034 let permission_kinds = tool_call_authorization
1035 .options
1036 .first_option_of_kind(acp::PermissionOptionKind::AllowAlways)
1037 .map(|option| option.kind);
1038 let allow_once = tool_call_authorization
1039 .options
1040 .first_option_of_kind(acp::PermissionOptionKind::AllowOnce)
1041 .map(|option| option.kind);
1042
1043 assert_eq!(
1044 permission_kinds,
1045 Some(acp::PermissionOptionKind::AllowAlways)
1046 );
1047 assert_eq!(allow_once, Some(acp::PermissionOptionKind::AllowOnce));
1048 return tool_call_authorization;
1049 }
1050 }
1051}
1052
1053#[test]
1054fn test_permission_options_terminal_with_pattern() {
1055 let permission_options = ToolPermissionContext::new(
1056 TerminalTool::NAME,
1057 vec!["cargo build --release".to_string()],
1058 )
1059 .build_permission_options();
1060
1061 let PermissionOptions::Dropdown(choices) = permission_options else {
1062 panic!("Expected dropdown permission options");
1063 };
1064
1065 assert_eq!(choices.len(), 3);
1066 let labels: Vec<&str> = choices
1067 .iter()
1068 .map(|choice| choice.allow.name.as_ref())
1069 .collect();
1070 assert!(labels.contains(&"Always for terminal"));
1071 assert!(labels.contains(&"Always for `cargo build` commands"));
1072 assert!(labels.contains(&"Only this time"));
1073}
1074
1075#[test]
1076fn test_permission_options_terminal_command_with_flag_second_token() {
1077 let permission_options =
1078 ToolPermissionContext::new(TerminalTool::NAME, vec!["ls -la".to_string()])
1079 .build_permission_options();
1080
1081 let PermissionOptions::Dropdown(choices) = permission_options else {
1082 panic!("Expected dropdown permission options");
1083 };
1084
1085 assert_eq!(choices.len(), 3);
1086 let labels: Vec<&str> = choices
1087 .iter()
1088 .map(|choice| choice.allow.name.as_ref())
1089 .collect();
1090 assert!(labels.contains(&"Always for terminal"));
1091 assert!(labels.contains(&"Always for `ls` commands"));
1092 assert!(labels.contains(&"Only this time"));
1093}
1094
1095#[test]
1096fn test_permission_options_terminal_single_word_command() {
1097 let permission_options =
1098 ToolPermissionContext::new(TerminalTool::NAME, vec!["whoami".to_string()])
1099 .build_permission_options();
1100
1101 let PermissionOptions::Dropdown(choices) = permission_options else {
1102 panic!("Expected dropdown permission options");
1103 };
1104
1105 assert_eq!(choices.len(), 3);
1106 let labels: Vec<&str> = choices
1107 .iter()
1108 .map(|choice| choice.allow.name.as_ref())
1109 .collect();
1110 assert!(labels.contains(&"Always for terminal"));
1111 assert!(labels.contains(&"Always for `whoami` commands"));
1112 assert!(labels.contains(&"Only this time"));
1113}
1114
1115#[test]
1116fn test_permission_options_edit_file_with_path_pattern() {
1117 let permission_options =
1118 ToolPermissionContext::new(EditFileTool::NAME, vec!["src/main.rs".to_string()])
1119 .build_permission_options();
1120
1121 let PermissionOptions::Dropdown(choices) = permission_options else {
1122 panic!("Expected dropdown permission options");
1123 };
1124
1125 let labels: Vec<&str> = choices
1126 .iter()
1127 .map(|choice| choice.allow.name.as_ref())
1128 .collect();
1129 assert!(labels.contains(&"Always for edit file"));
1130 assert!(labels.contains(&"Always for `src/`"));
1131}
1132
1133#[test]
1134fn test_permission_options_fetch_with_domain_pattern() {
1135 let permission_options =
1136 ToolPermissionContext::new(FetchTool::NAME, vec!["https://docs.rs/gpui".to_string()])
1137 .build_permission_options();
1138
1139 let PermissionOptions::Dropdown(choices) = permission_options else {
1140 panic!("Expected dropdown permission options");
1141 };
1142
1143 let labels: Vec<&str> = choices
1144 .iter()
1145 .map(|choice| choice.allow.name.as_ref())
1146 .collect();
1147 assert!(labels.contains(&"Always for fetch"));
1148 assert!(labels.contains(&"Always for `docs.rs`"));
1149}
1150
1151#[test]
1152fn test_permission_options_without_pattern() {
1153 let permission_options = ToolPermissionContext::new(
1154 TerminalTool::NAME,
1155 vec!["./deploy.sh --production".to_string()],
1156 )
1157 .build_permission_options();
1158
1159 let PermissionOptions::Dropdown(choices) = permission_options else {
1160 panic!("Expected dropdown permission options");
1161 };
1162
1163 assert_eq!(choices.len(), 2);
1164 let labels: Vec<&str> = choices
1165 .iter()
1166 .map(|choice| choice.allow.name.as_ref())
1167 .collect();
1168 assert!(labels.contains(&"Always for terminal"));
1169 assert!(labels.contains(&"Only this time"));
1170 assert!(!labels.iter().any(|label| label.contains("commands")));
1171}
1172
1173#[test]
1174fn test_permission_options_symlink_target_are_flat_once_only() {
1175 let permission_options =
1176 ToolPermissionContext::symlink_target(EditFileTool::NAME, vec!["/outside/file.txt".into()])
1177 .build_permission_options();
1178
1179 let PermissionOptions::Flat(options) = permission_options else {
1180 panic!("Expected flat permission options for symlink target authorization");
1181 };
1182
1183 assert_eq!(options.len(), 2);
1184 assert!(options.iter().any(|option| {
1185 option.option_id.0.as_ref() == "allow"
1186 && option.kind == acp::PermissionOptionKind::AllowOnce
1187 }));
1188 assert!(options.iter().any(|option| {
1189 option.option_id.0.as_ref() == "deny"
1190 && option.kind == acp::PermissionOptionKind::RejectOnce
1191 }));
1192}
1193
1194#[test]
1195fn test_permission_option_ids_for_terminal() {
1196 let permission_options = ToolPermissionContext::new(
1197 TerminalTool::NAME,
1198 vec!["cargo build --release".to_string()],
1199 )
1200 .build_permission_options();
1201
1202 let PermissionOptions::Dropdown(choices) = permission_options else {
1203 panic!("Expected dropdown permission options");
1204 };
1205
1206 // Expect 3 choices: always-tool, always-pattern, once
1207 assert_eq!(choices.len(), 3);
1208
1209 // First two choices both use the tool-level option IDs
1210 assert_eq!(
1211 choices[0].allow.option_id.0.as_ref(),
1212 "always_allow:terminal"
1213 );
1214 assert_eq!(choices[0].deny.option_id.0.as_ref(), "always_deny:terminal");
1215 assert!(choices[0].sub_patterns.is_empty());
1216
1217 assert_eq!(
1218 choices[1].allow.option_id.0.as_ref(),
1219 "always_allow:terminal"
1220 );
1221 assert_eq!(choices[1].deny.option_id.0.as_ref(), "always_deny:terminal");
1222 assert_eq!(choices[1].sub_patterns, vec!["^cargo\\s+build(\\s|$)"]);
1223
1224 // Third choice is the one-time allow/deny
1225 assert_eq!(choices[2].allow.option_id.0.as_ref(), "allow");
1226 assert_eq!(choices[2].deny.option_id.0.as_ref(), "deny");
1227 assert!(choices[2].sub_patterns.is_empty());
1228}
1229
1230#[test]
1231fn test_permission_options_terminal_pipeline_produces_dropdown_with_patterns() {
1232 let permission_options = ToolPermissionContext::new(
1233 TerminalTool::NAME,
1234 vec!["cargo test 2>&1 | tail".to_string()],
1235 )
1236 .build_permission_options();
1237
1238 let PermissionOptions::DropdownWithPatterns {
1239 choices,
1240 patterns,
1241 tool_name,
1242 } = permission_options
1243 else {
1244 panic!("Expected DropdownWithPatterns permission options for pipeline command");
1245 };
1246
1247 assert_eq!(tool_name, TerminalTool::NAME);
1248
1249 // Should have "Always for terminal" and "Only this time" choices
1250 assert_eq!(choices.len(), 2);
1251 let labels: Vec<&str> = choices
1252 .iter()
1253 .map(|choice| choice.allow.name.as_ref())
1254 .collect();
1255 assert!(labels.contains(&"Always for terminal"));
1256 assert!(labels.contains(&"Only this time"));
1257
1258 // Should have per-command patterns for "cargo test" and "tail"
1259 assert_eq!(patterns.len(), 2);
1260 let pattern_names: Vec<&str> = patterns.iter().map(|cp| cp.display_name.as_str()).collect();
1261 assert!(pattern_names.contains(&"cargo test"));
1262 assert!(pattern_names.contains(&"tail"));
1263
1264 // Verify patterns are valid regex patterns
1265 let regex_patterns: Vec<&str> = patterns.iter().map(|cp| cp.pattern.as_str()).collect();
1266 assert!(regex_patterns.contains(&"^cargo\\s+test(\\s|$)"));
1267 assert!(regex_patterns.contains(&"^tail\\b"));
1268}
1269
1270#[test]
1271fn test_permission_options_terminal_pipeline_with_chaining() {
1272 let permission_options = ToolPermissionContext::new(
1273 TerminalTool::NAME,
1274 vec!["npm install && npm test | tail".to_string()],
1275 )
1276 .build_permission_options();
1277
1278 let PermissionOptions::DropdownWithPatterns { patterns, .. } = permission_options else {
1279 panic!("Expected DropdownWithPatterns for chained pipeline command");
1280 };
1281
1282 // With subcommand-aware patterns, "npm install" and "npm test" are distinct
1283 assert_eq!(patterns.len(), 3);
1284 let pattern_names: Vec<&str> = patterns.iter().map(|cp| cp.display_name.as_str()).collect();
1285 assert!(pattern_names.contains(&"npm install"));
1286 assert!(pattern_names.contains(&"npm test"));
1287 assert!(pattern_names.contains(&"tail"));
1288}
1289
1290#[gpui::test]
1291#[cfg_attr(not(feature = "e2e"), ignore)]
1292async fn test_concurrent_tool_calls(cx: &mut TestAppContext) {
1293 let ThreadTest { thread, .. } = setup(cx, TestModel::Sonnet4).await;
1294
1295 // Test concurrent tool calls with different delay times
1296 let events = thread
1297 .update(cx, |thread, cx| {
1298 thread.add_tool(DelayTool);
1299 thread.send(
1300 UserMessageId::new(),
1301 [
1302 "Call the delay tool twice in the same message.",
1303 "Once with 100ms. Once with 300ms.",
1304 "When both timers are complete, describe the outputs.",
1305 ],
1306 cx,
1307 )
1308 })
1309 .unwrap()
1310 .collect()
1311 .await;
1312
1313 let stop_reasons = stop_events(events);
1314 assert_eq!(stop_reasons, vec![acp::StopReason::EndTurn]);
1315
1316 thread.update(cx, |thread, _cx| {
1317 let last_message = thread.last_received_or_pending_message().unwrap();
1318 let agent_message = last_message.as_agent_message().unwrap();
1319 let text = agent_message
1320 .content
1321 .iter()
1322 .filter_map(|content| {
1323 if let AgentMessageContent::Text(text) = content {
1324 Some(text.as_str())
1325 } else {
1326 None
1327 }
1328 })
1329 .collect::<String>();
1330
1331 assert!(text.contains("Ding"));
1332 });
1333}
1334
1335#[gpui::test]
1336async fn test_profiles(cx: &mut TestAppContext) {
1337 let ThreadTest {
1338 model, thread, fs, ..
1339 } = setup(cx, TestModel::Fake).await;
1340 let fake_model = model.as_fake();
1341
1342 thread.update(cx, |thread, _cx| {
1343 thread.add_tool(DelayTool);
1344 thread.add_tool(EchoTool);
1345 thread.add_tool(InfiniteTool);
1346 });
1347
1348 // Override profiles and wait for settings to be loaded.
1349 fs.insert_file(
1350 paths::settings_file(),
1351 json!({
1352 "agent": {
1353 "profiles": {
1354 "test-1": {
1355 "name": "Test Profile 1",
1356 "tools": {
1357 EchoTool::NAME: true,
1358 DelayTool::NAME: true,
1359 }
1360 },
1361 "test-2": {
1362 "name": "Test Profile 2",
1363 "tools": {
1364 InfiniteTool::NAME: true,
1365 }
1366 }
1367 }
1368 }
1369 })
1370 .to_string()
1371 .into_bytes(),
1372 )
1373 .await;
1374 cx.run_until_parked();
1375
1376 // Test that test-1 profile (default) has echo and delay tools
1377 thread
1378 .update(cx, |thread, cx| {
1379 thread.set_profile(AgentProfileId("test-1".into()), cx);
1380 thread.send(UserMessageId::new(), ["test"], cx)
1381 })
1382 .unwrap();
1383 cx.run_until_parked();
1384
1385 let mut pending_completions = fake_model.pending_completions();
1386 assert_eq!(pending_completions.len(), 1);
1387 let completion = pending_completions.pop().unwrap();
1388 let tool_names: Vec<String> = completion
1389 .tools
1390 .iter()
1391 .map(|tool| tool.name.clone())
1392 .collect();
1393 assert_eq!(tool_names, vec![DelayTool::NAME, EchoTool::NAME]);
1394 fake_model.end_last_completion_stream();
1395
1396 // Switch to test-2 profile, and verify that it has only the infinite tool.
1397 thread
1398 .update(cx, |thread, cx| {
1399 thread.set_profile(AgentProfileId("test-2".into()), cx);
1400 thread.send(UserMessageId::new(), ["test2"], cx)
1401 })
1402 .unwrap();
1403 cx.run_until_parked();
1404 let mut pending_completions = fake_model.pending_completions();
1405 assert_eq!(pending_completions.len(), 1);
1406 let completion = pending_completions.pop().unwrap();
1407 let tool_names: Vec<String> = completion
1408 .tools
1409 .iter()
1410 .map(|tool| tool.name.clone())
1411 .collect();
1412 assert_eq!(tool_names, vec![InfiniteTool::NAME]);
1413}
1414
1415#[gpui::test]
1416async fn test_mcp_tools(cx: &mut TestAppContext) {
1417 let ThreadTest {
1418 model,
1419 thread,
1420 context_server_store,
1421 fs,
1422 ..
1423 } = setup(cx, TestModel::Fake).await;
1424 let fake_model = model.as_fake();
1425
1426 // Override profiles and wait for settings to be loaded.
1427 fs.insert_file(
1428 paths::settings_file(),
1429 json!({
1430 "agent": {
1431 "tool_permissions": { "default": "allow" },
1432 "profiles": {
1433 "test": {
1434 "name": "Test Profile",
1435 "enable_all_context_servers": true,
1436 "tools": {
1437 EchoTool::NAME: true,
1438 }
1439 },
1440 }
1441 }
1442 })
1443 .to_string()
1444 .into_bytes(),
1445 )
1446 .await;
1447 cx.run_until_parked();
1448 thread.update(cx, |thread, cx| {
1449 thread.set_profile(AgentProfileId("test".into()), cx)
1450 });
1451
1452 let mut mcp_tool_calls = setup_context_server(
1453 "test_server",
1454 vec![context_server::types::Tool {
1455 name: "echo".into(),
1456 description: None,
1457 input_schema: serde_json::to_value(EchoTool::input_schema(
1458 LanguageModelToolSchemaFormat::JsonSchema,
1459 ))
1460 .unwrap(),
1461 output_schema: None,
1462 annotations: None,
1463 }],
1464 &context_server_store,
1465 cx,
1466 );
1467
1468 let events = thread.update(cx, |thread, cx| {
1469 thread.send(UserMessageId::new(), ["Hey"], cx).unwrap()
1470 });
1471 cx.run_until_parked();
1472
1473 // Simulate the model calling the MCP tool.
1474 let completion = fake_model.pending_completions().pop().unwrap();
1475 assert_eq!(tool_names_for_completion(&completion), vec!["echo"]);
1476 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
1477 LanguageModelToolUse {
1478 id: "tool_1".into(),
1479 name: "echo".into(),
1480 raw_input: json!({"text": "test"}).to_string(),
1481 input: json!({"text": "test"}),
1482 is_input_complete: true,
1483 thought_signature: None,
1484 },
1485 ));
1486 fake_model.end_last_completion_stream();
1487 cx.run_until_parked();
1488
1489 let (tool_call_params, tool_call_response) = mcp_tool_calls.next().await.unwrap();
1490 assert_eq!(tool_call_params.name, "echo");
1491 assert_eq!(tool_call_params.arguments, Some(json!({"text": "test"})));
1492 tool_call_response
1493 .send(context_server::types::CallToolResponse {
1494 content: vec![context_server::types::ToolResponseContent::Text {
1495 text: "test".into(),
1496 }],
1497 is_error: None,
1498 meta: None,
1499 structured_content: None,
1500 })
1501 .unwrap();
1502 cx.run_until_parked();
1503
1504 assert_eq!(tool_names_for_completion(&completion), vec!["echo"]);
1505 fake_model.send_last_completion_stream_text_chunk("Done!");
1506 fake_model.end_last_completion_stream();
1507 events.collect::<Vec<_>>().await;
1508
1509 // Send again after adding the echo tool, ensuring the name collision is resolved.
1510 let events = thread.update(cx, |thread, cx| {
1511 thread.add_tool(EchoTool);
1512 thread.send(UserMessageId::new(), ["Go"], cx).unwrap()
1513 });
1514 cx.run_until_parked();
1515 let completion = fake_model.pending_completions().pop().unwrap();
1516 assert_eq!(
1517 tool_names_for_completion(&completion),
1518 vec!["echo", "test_server_echo"]
1519 );
1520 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
1521 LanguageModelToolUse {
1522 id: "tool_2".into(),
1523 name: "test_server_echo".into(),
1524 raw_input: json!({"text": "mcp"}).to_string(),
1525 input: json!({"text": "mcp"}),
1526 is_input_complete: true,
1527 thought_signature: None,
1528 },
1529 ));
1530 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
1531 LanguageModelToolUse {
1532 id: "tool_3".into(),
1533 name: "echo".into(),
1534 raw_input: json!({"text": "native"}).to_string(),
1535 input: json!({"text": "native"}),
1536 is_input_complete: true,
1537 thought_signature: None,
1538 },
1539 ));
1540 fake_model.end_last_completion_stream();
1541 cx.run_until_parked();
1542
1543 let (tool_call_params, tool_call_response) = mcp_tool_calls.next().await.unwrap();
1544 assert_eq!(tool_call_params.name, "echo");
1545 assert_eq!(tool_call_params.arguments, Some(json!({"text": "mcp"})));
1546 tool_call_response
1547 .send(context_server::types::CallToolResponse {
1548 content: vec![context_server::types::ToolResponseContent::Text { text: "mcp".into() }],
1549 is_error: None,
1550 meta: None,
1551 structured_content: None,
1552 })
1553 .unwrap();
1554 cx.run_until_parked();
1555
1556 // Ensure the tool results were inserted with the correct names.
1557 let completion = fake_model.pending_completions().pop().unwrap();
1558 assert_eq!(
1559 completion.messages.last().unwrap().content,
1560 vec![
1561 MessageContent::ToolResult(LanguageModelToolResult {
1562 tool_use_id: "tool_3".into(),
1563 tool_name: "echo".into(),
1564 is_error: false,
1565 content: "native".into(),
1566 output: Some("native".into()),
1567 },),
1568 MessageContent::ToolResult(LanguageModelToolResult {
1569 tool_use_id: "tool_2".into(),
1570 tool_name: "test_server_echo".into(),
1571 is_error: false,
1572 content: "mcp".into(),
1573 output: Some("mcp".into()),
1574 },),
1575 ]
1576 );
1577 fake_model.end_last_completion_stream();
1578 events.collect::<Vec<_>>().await;
1579}
1580
1581#[gpui::test]
1582async fn test_mcp_tool_result_displayed_when_server_disconnected(cx: &mut TestAppContext) {
1583 let ThreadTest {
1584 model,
1585 thread,
1586 context_server_store,
1587 fs,
1588 ..
1589 } = setup(cx, TestModel::Fake).await;
1590 let fake_model = model.as_fake();
1591
1592 // Setup settings to allow MCP tools
1593 fs.insert_file(
1594 paths::settings_file(),
1595 json!({
1596 "agent": {
1597 "always_allow_tool_actions": true,
1598 "profiles": {
1599 "test": {
1600 "name": "Test Profile",
1601 "enable_all_context_servers": true,
1602 "tools": {}
1603 },
1604 }
1605 }
1606 })
1607 .to_string()
1608 .into_bytes(),
1609 )
1610 .await;
1611 cx.run_until_parked();
1612 thread.update(cx, |thread, cx| {
1613 thread.set_profile(AgentProfileId("test".into()), cx)
1614 });
1615
1616 // Setup a context server with a tool
1617 let mut mcp_tool_calls = setup_context_server(
1618 "github_server",
1619 vec![context_server::types::Tool {
1620 name: "issue_read".into(),
1621 description: Some("Read a GitHub issue".into()),
1622 input_schema: json!({
1623 "type": "object",
1624 "properties": {
1625 "issue_url": { "type": "string" }
1626 }
1627 }),
1628 output_schema: None,
1629 annotations: None,
1630 }],
1631 &context_server_store,
1632 cx,
1633 );
1634
1635 // Send a message and have the model call the MCP tool
1636 let events = thread.update(cx, |thread, cx| {
1637 thread
1638 .send(UserMessageId::new(), ["Read issue #47404"], cx)
1639 .unwrap()
1640 });
1641 cx.run_until_parked();
1642
1643 // Verify the MCP tool is available to the model
1644 let completion = fake_model.pending_completions().pop().unwrap();
1645 assert_eq!(
1646 tool_names_for_completion(&completion),
1647 vec!["issue_read"],
1648 "MCP tool should be available"
1649 );
1650
1651 // Simulate the model calling the MCP tool
1652 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
1653 LanguageModelToolUse {
1654 id: "tool_1".into(),
1655 name: "issue_read".into(),
1656 raw_input: json!({"issue_url": "https://github.com/zed-industries/zed/issues/47404"})
1657 .to_string(),
1658 input: json!({"issue_url": "https://github.com/zed-industries/zed/issues/47404"}),
1659 is_input_complete: true,
1660 thought_signature: None,
1661 },
1662 ));
1663 fake_model.end_last_completion_stream();
1664 cx.run_until_parked();
1665
1666 // The MCP server receives the tool call and responds with content
1667 let expected_tool_output = "Issue #47404: Tool call results are cleared upon app close";
1668 let (tool_call_params, tool_call_response) = mcp_tool_calls.next().await.unwrap();
1669 assert_eq!(tool_call_params.name, "issue_read");
1670 tool_call_response
1671 .send(context_server::types::CallToolResponse {
1672 content: vec![context_server::types::ToolResponseContent::Text {
1673 text: expected_tool_output.into(),
1674 }],
1675 is_error: None,
1676 meta: None,
1677 structured_content: None,
1678 })
1679 .unwrap();
1680 cx.run_until_parked();
1681
1682 // After tool completes, the model continues with a new completion request
1683 // that includes the tool results. We need to respond to this.
1684 let _completion = fake_model.pending_completions().pop().unwrap();
1685 fake_model.send_last_completion_stream_text_chunk("I found the issue!");
1686 fake_model
1687 .send_last_completion_stream_event(LanguageModelCompletionEvent::Stop(StopReason::EndTurn));
1688 fake_model.end_last_completion_stream();
1689 events.collect::<Vec<_>>().await;
1690
1691 // Verify the tool result is stored in the thread by checking the markdown output.
1692 // The tool result is in the first assistant message (not the last one, which is
1693 // the model's response after the tool completed).
1694 thread.update(cx, |thread, _cx| {
1695 let markdown = thread.to_markdown();
1696 assert!(
1697 markdown.contains("**Tool Result**: issue_read"),
1698 "Thread should contain tool result header"
1699 );
1700 assert!(
1701 markdown.contains(expected_tool_output),
1702 "Thread should contain tool output: {}",
1703 expected_tool_output
1704 );
1705 });
1706
1707 // Simulate app restart: disconnect the MCP server.
1708 // After restart, the MCP server won't be connected yet when the thread is replayed.
1709 context_server_store.update(cx, |store, cx| {
1710 let _ = store.stop_server(&ContextServerId("github_server".into()), cx);
1711 });
1712 cx.run_until_parked();
1713
1714 // Replay the thread (this is what happens when loading a saved thread)
1715 let mut replay_events = thread.update(cx, |thread, cx| thread.replay(cx));
1716
1717 let mut found_tool_call = None;
1718 let mut found_tool_call_update_with_output = None;
1719
1720 while let Some(event) = replay_events.next().await {
1721 let event = event.unwrap();
1722 match &event {
1723 ThreadEvent::ToolCall(tc) if tc.tool_call_id.to_string() == "tool_1" => {
1724 found_tool_call = Some(tc.clone());
1725 }
1726 ThreadEvent::ToolCallUpdate(acp_thread::ToolCallUpdate::UpdateFields(update))
1727 if update.tool_call_id.to_string() == "tool_1" =>
1728 {
1729 if update.fields.raw_output.is_some() {
1730 found_tool_call_update_with_output = Some(update.clone());
1731 }
1732 }
1733 _ => {}
1734 }
1735 }
1736
1737 // The tool call should be found
1738 assert!(
1739 found_tool_call.is_some(),
1740 "Tool call should be emitted during replay"
1741 );
1742
1743 assert!(
1744 found_tool_call_update_with_output.is_some(),
1745 "ToolCallUpdate with raw_output should be emitted even when MCP server is disconnected."
1746 );
1747
1748 let update = found_tool_call_update_with_output.unwrap();
1749 assert_eq!(
1750 update.fields.raw_output,
1751 Some(expected_tool_output.into()),
1752 "raw_output should contain the saved tool result"
1753 );
1754
1755 // Also verify the status is correct (completed, not failed)
1756 assert_eq!(
1757 update.fields.status,
1758 Some(acp::ToolCallStatus::Completed),
1759 "Tool call status should reflect the original completion status"
1760 );
1761}
1762
1763#[gpui::test]
1764async fn test_mcp_tool_truncation(cx: &mut TestAppContext) {
1765 let ThreadTest {
1766 model,
1767 thread,
1768 context_server_store,
1769 fs,
1770 ..
1771 } = setup(cx, TestModel::Fake).await;
1772 let fake_model = model.as_fake();
1773
1774 // Set up a profile with all tools enabled
1775 fs.insert_file(
1776 paths::settings_file(),
1777 json!({
1778 "agent": {
1779 "profiles": {
1780 "test": {
1781 "name": "Test Profile",
1782 "enable_all_context_servers": true,
1783 "tools": {
1784 EchoTool::NAME: true,
1785 DelayTool::NAME: true,
1786 WordListTool::NAME: true,
1787 ToolRequiringPermission::NAME: true,
1788 InfiniteTool::NAME: true,
1789 }
1790 },
1791 }
1792 }
1793 })
1794 .to_string()
1795 .into_bytes(),
1796 )
1797 .await;
1798 cx.run_until_parked();
1799
1800 thread.update(cx, |thread, cx| {
1801 thread.set_profile(AgentProfileId("test".into()), cx);
1802 thread.add_tool(EchoTool);
1803 thread.add_tool(DelayTool);
1804 thread.add_tool(WordListTool);
1805 thread.add_tool(ToolRequiringPermission);
1806 thread.add_tool(InfiniteTool);
1807 });
1808
1809 // Set up multiple context servers with some overlapping tool names
1810 let _server1_calls = setup_context_server(
1811 "xxx",
1812 vec![
1813 context_server::types::Tool {
1814 name: "echo".into(), // Conflicts with native EchoTool
1815 description: None,
1816 input_schema: serde_json::to_value(EchoTool::input_schema(
1817 LanguageModelToolSchemaFormat::JsonSchema,
1818 ))
1819 .unwrap(),
1820 output_schema: None,
1821 annotations: None,
1822 },
1823 context_server::types::Tool {
1824 name: "unique_tool_1".into(),
1825 description: None,
1826 input_schema: json!({"type": "object", "properties": {}}),
1827 output_schema: None,
1828 annotations: None,
1829 },
1830 ],
1831 &context_server_store,
1832 cx,
1833 );
1834
1835 let _server2_calls = setup_context_server(
1836 "yyy",
1837 vec![
1838 context_server::types::Tool {
1839 name: "echo".into(), // Also conflicts with native EchoTool
1840 description: None,
1841 input_schema: serde_json::to_value(EchoTool::input_schema(
1842 LanguageModelToolSchemaFormat::JsonSchema,
1843 ))
1844 .unwrap(),
1845 output_schema: None,
1846 annotations: None,
1847 },
1848 context_server::types::Tool {
1849 name: "unique_tool_2".into(),
1850 description: None,
1851 input_schema: json!({"type": "object", "properties": {}}),
1852 output_schema: None,
1853 annotations: None,
1854 },
1855 context_server::types::Tool {
1856 name: "a".repeat(MAX_TOOL_NAME_LENGTH - 2),
1857 description: None,
1858 input_schema: json!({"type": "object", "properties": {}}),
1859 output_schema: None,
1860 annotations: None,
1861 },
1862 context_server::types::Tool {
1863 name: "b".repeat(MAX_TOOL_NAME_LENGTH - 1),
1864 description: None,
1865 input_schema: json!({"type": "object", "properties": {}}),
1866 output_schema: None,
1867 annotations: None,
1868 },
1869 ],
1870 &context_server_store,
1871 cx,
1872 );
1873 let _server3_calls = setup_context_server(
1874 "zzz",
1875 vec![
1876 context_server::types::Tool {
1877 name: "a".repeat(MAX_TOOL_NAME_LENGTH - 2),
1878 description: None,
1879 input_schema: json!({"type": "object", "properties": {}}),
1880 output_schema: None,
1881 annotations: None,
1882 },
1883 context_server::types::Tool {
1884 name: "b".repeat(MAX_TOOL_NAME_LENGTH - 1),
1885 description: None,
1886 input_schema: json!({"type": "object", "properties": {}}),
1887 output_schema: None,
1888 annotations: None,
1889 },
1890 context_server::types::Tool {
1891 name: "c".repeat(MAX_TOOL_NAME_LENGTH + 1),
1892 description: None,
1893 input_schema: json!({"type": "object", "properties": {}}),
1894 output_schema: None,
1895 annotations: None,
1896 },
1897 ],
1898 &context_server_store,
1899 cx,
1900 );
1901
1902 // Server with spaces in name - tests snake_case conversion for API compatibility
1903 let _server4_calls = setup_context_server(
1904 "Azure DevOps",
1905 vec![context_server::types::Tool {
1906 name: "echo".into(), // Also conflicts - will be disambiguated as azure_dev_ops_echo
1907 description: None,
1908 input_schema: serde_json::to_value(EchoTool::input_schema(
1909 LanguageModelToolSchemaFormat::JsonSchema,
1910 ))
1911 .unwrap(),
1912 output_schema: None,
1913 annotations: None,
1914 }],
1915 &context_server_store,
1916 cx,
1917 );
1918
1919 thread
1920 .update(cx, |thread, cx| {
1921 thread.send(UserMessageId::new(), ["Go"], cx)
1922 })
1923 .unwrap();
1924 cx.run_until_parked();
1925 let completion = fake_model.pending_completions().pop().unwrap();
1926 assert_eq!(
1927 tool_names_for_completion(&completion),
1928 vec![
1929 "azure_dev_ops_echo",
1930 "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
1931 "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc",
1932 "delay",
1933 "echo",
1934 "infinite",
1935 "tool_requiring_permission",
1936 "unique_tool_1",
1937 "unique_tool_2",
1938 "word_list",
1939 "xxx_echo",
1940 "y_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
1941 "yyy_echo",
1942 "z_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
1943 ]
1944 );
1945}
1946
1947#[gpui::test]
1948#[cfg_attr(not(feature = "e2e"), ignore)]
1949async fn test_cancellation(cx: &mut TestAppContext) {
1950 let ThreadTest { thread, .. } = setup(cx, TestModel::Sonnet4).await;
1951
1952 let mut events = thread
1953 .update(cx, |thread, cx| {
1954 thread.add_tool(InfiniteTool);
1955 thread.add_tool(EchoTool);
1956 thread.send(
1957 UserMessageId::new(),
1958 ["Call the echo tool, then call the infinite tool, then explain their output"],
1959 cx,
1960 )
1961 })
1962 .unwrap();
1963
1964 // Wait until both tools are called.
1965 let mut expected_tools = vec!["Echo", "Infinite Tool"];
1966 let mut echo_id = None;
1967 let mut echo_completed = false;
1968 while let Some(event) = events.next().await {
1969 match event.unwrap() {
1970 ThreadEvent::ToolCall(tool_call) => {
1971 assert_eq!(tool_call.title, expected_tools.remove(0));
1972 if tool_call.title == "Echo" {
1973 echo_id = Some(tool_call.tool_call_id);
1974 }
1975 }
1976 ThreadEvent::ToolCallUpdate(acp_thread::ToolCallUpdate::UpdateFields(
1977 acp::ToolCallUpdate {
1978 tool_call_id,
1979 fields:
1980 acp::ToolCallUpdateFields {
1981 status: Some(acp::ToolCallStatus::Completed),
1982 ..
1983 },
1984 ..
1985 },
1986 )) if Some(&tool_call_id) == echo_id.as_ref() => {
1987 echo_completed = true;
1988 }
1989 _ => {}
1990 }
1991
1992 if expected_tools.is_empty() && echo_completed {
1993 break;
1994 }
1995 }
1996
1997 // Cancel the current send and ensure that the event stream is closed, even
1998 // if one of the tools is still running.
1999 thread.update(cx, |thread, cx| thread.cancel(cx)).await;
2000 let events = events.collect::<Vec<_>>().await;
2001 let last_event = events.last();
2002 assert!(
2003 matches!(
2004 last_event,
2005 Some(Ok(ThreadEvent::Stop(acp::StopReason::Cancelled)))
2006 ),
2007 "unexpected event {last_event:?}"
2008 );
2009
2010 // Ensure we can still send a new message after cancellation.
2011 let events = thread
2012 .update(cx, |thread, cx| {
2013 thread.send(
2014 UserMessageId::new(),
2015 ["Testing: reply with 'Hello' then stop."],
2016 cx,
2017 )
2018 })
2019 .unwrap()
2020 .collect::<Vec<_>>()
2021 .await;
2022 thread.update(cx, |thread, _cx| {
2023 let message = thread.last_received_or_pending_message().unwrap();
2024 let agent_message = message.as_agent_message().unwrap();
2025 assert_eq!(
2026 agent_message.content,
2027 vec![AgentMessageContent::Text("Hello".to_string())]
2028 );
2029 });
2030 assert_eq!(stop_events(events), vec![acp::StopReason::EndTurn]);
2031}
2032
2033#[gpui::test]
2034async fn test_terminal_tool_cancellation_captures_output(cx: &mut TestAppContext) {
2035 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
2036 always_allow_tools(cx);
2037 let fake_model = model.as_fake();
2038
2039 let environment = Rc::new(cx.update(|cx| {
2040 FakeThreadEnvironment::default().with_terminal(FakeTerminalHandle::new_never_exits(cx))
2041 }));
2042 let handle = environment.terminal_handle.clone().unwrap();
2043
2044 let mut events = thread
2045 .update(cx, |thread, cx| {
2046 thread.add_tool(crate::TerminalTool::new(
2047 thread.project().clone(),
2048 environment,
2049 ));
2050 thread.send(UserMessageId::new(), ["run a command"], cx)
2051 })
2052 .unwrap();
2053
2054 cx.run_until_parked();
2055
2056 // Simulate the model calling the terminal tool
2057 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
2058 LanguageModelToolUse {
2059 id: "terminal_tool_1".into(),
2060 name: TerminalTool::NAME.into(),
2061 raw_input: r#"{"command": "sleep 1000", "cd": "."}"#.into(),
2062 input: json!({"command": "sleep 1000", "cd": "."}),
2063 is_input_complete: true,
2064 thought_signature: None,
2065 },
2066 ));
2067 fake_model.end_last_completion_stream();
2068
2069 // Wait for the terminal tool to start running
2070 wait_for_terminal_tool_started(&mut events, cx).await;
2071
2072 // Cancel the thread while the terminal is running
2073 thread.update(cx, |thread, cx| thread.cancel(cx)).detach();
2074
2075 // Collect remaining events, driving the executor to let cancellation complete
2076 let remaining_events = collect_events_until_stop(&mut events, cx).await;
2077
2078 // Verify the terminal was killed
2079 assert!(
2080 handle.was_killed(),
2081 "expected terminal handle to be killed on cancellation"
2082 );
2083
2084 // Verify we got a cancellation stop event
2085 assert_eq!(
2086 stop_events(remaining_events),
2087 vec![acp::StopReason::Cancelled],
2088 );
2089
2090 // Verify the tool result contains the terminal output, not just "Tool canceled by user"
2091 thread.update(cx, |thread, _cx| {
2092 let message = thread.last_received_or_pending_message().unwrap();
2093 let agent_message = message.as_agent_message().unwrap();
2094
2095 let tool_use = agent_message
2096 .content
2097 .iter()
2098 .find_map(|content| match content {
2099 AgentMessageContent::ToolUse(tool_use) => Some(tool_use),
2100 _ => None,
2101 })
2102 .expect("expected tool use in agent message");
2103
2104 let tool_result = agent_message
2105 .tool_results
2106 .get(&tool_use.id)
2107 .expect("expected tool result");
2108
2109 let result_text = match &tool_result.content {
2110 language_model::LanguageModelToolResultContent::Text(text) => text.to_string(),
2111 _ => panic!("expected text content in tool result"),
2112 };
2113
2114 // "partial output" comes from FakeTerminalHandle's output field
2115 assert!(
2116 result_text.contains("partial output"),
2117 "expected tool result to contain terminal output, got: {result_text}"
2118 );
2119 // Match the actual format from process_content in terminal_tool.rs
2120 assert!(
2121 result_text.contains("The user stopped this command"),
2122 "expected tool result to indicate user stopped, got: {result_text}"
2123 );
2124 });
2125
2126 // Verify we can send a new message after cancellation
2127 verify_thread_recovery(&thread, &fake_model, cx).await;
2128}
2129
2130#[gpui::test]
2131async fn test_cancellation_aware_tool_responds_to_cancellation(cx: &mut TestAppContext) {
2132 // This test verifies that tools which properly handle cancellation via
2133 // `event_stream.cancelled_by_user()` (like edit_file_tool) respond promptly
2134 // to cancellation and report that they were cancelled.
2135 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
2136 always_allow_tools(cx);
2137 let fake_model = model.as_fake();
2138
2139 let (tool, was_cancelled) = CancellationAwareTool::new();
2140
2141 let mut events = thread
2142 .update(cx, |thread, cx| {
2143 thread.add_tool(tool);
2144 thread.send(
2145 UserMessageId::new(),
2146 ["call the cancellation aware tool"],
2147 cx,
2148 )
2149 })
2150 .unwrap();
2151
2152 cx.run_until_parked();
2153
2154 // Simulate the model calling the cancellation-aware tool
2155 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
2156 LanguageModelToolUse {
2157 id: "cancellation_aware_1".into(),
2158 name: "cancellation_aware".into(),
2159 raw_input: r#"{}"#.into(),
2160 input: json!({}),
2161 is_input_complete: true,
2162 thought_signature: None,
2163 },
2164 ));
2165 fake_model.end_last_completion_stream();
2166
2167 cx.run_until_parked();
2168
2169 // Wait for the tool call to be reported
2170 let mut tool_started = false;
2171 let deadline = cx.executor().num_cpus() * 100;
2172 for _ in 0..deadline {
2173 cx.run_until_parked();
2174
2175 while let Some(Some(event)) = events.next().now_or_never() {
2176 if let Ok(ThreadEvent::ToolCall(tool_call)) = &event {
2177 if tool_call.title == "Cancellation Aware Tool" {
2178 tool_started = true;
2179 break;
2180 }
2181 }
2182 }
2183
2184 if tool_started {
2185 break;
2186 }
2187
2188 cx.background_executor
2189 .timer(Duration::from_millis(10))
2190 .await;
2191 }
2192 assert!(tool_started, "expected cancellation aware tool to start");
2193
2194 // Cancel the thread and wait for it to complete
2195 let cancel_task = thread.update(cx, |thread, cx| thread.cancel(cx));
2196
2197 // The cancel task should complete promptly because the tool handles cancellation
2198 let timeout = cx.background_executor.timer(Duration::from_secs(5));
2199 futures::select! {
2200 _ = cancel_task.fuse() => {}
2201 _ = timeout.fuse() => {
2202 panic!("cancel task timed out - tool did not respond to cancellation");
2203 }
2204 }
2205
2206 // Verify the tool detected cancellation via its flag
2207 assert!(
2208 was_cancelled.load(std::sync::atomic::Ordering::SeqCst),
2209 "tool should have detected cancellation via event_stream.cancelled_by_user()"
2210 );
2211
2212 // Collect remaining events
2213 let remaining_events = collect_events_until_stop(&mut events, cx).await;
2214
2215 // Verify we got a cancellation stop event
2216 assert_eq!(
2217 stop_events(remaining_events),
2218 vec![acp::StopReason::Cancelled],
2219 );
2220
2221 // Verify we can send a new message after cancellation
2222 verify_thread_recovery(&thread, &fake_model, cx).await;
2223}
2224
2225/// Helper to verify thread can recover after cancellation by sending a simple message.
2226async fn verify_thread_recovery(
2227 thread: &Entity<Thread>,
2228 fake_model: &FakeLanguageModel,
2229 cx: &mut TestAppContext,
2230) {
2231 let events = thread
2232 .update(cx, |thread, cx| {
2233 thread.send(
2234 UserMessageId::new(),
2235 ["Testing: reply with 'Hello' then stop."],
2236 cx,
2237 )
2238 })
2239 .unwrap();
2240 cx.run_until_parked();
2241 fake_model.send_last_completion_stream_text_chunk("Hello");
2242 fake_model
2243 .send_last_completion_stream_event(LanguageModelCompletionEvent::Stop(StopReason::EndTurn));
2244 fake_model.end_last_completion_stream();
2245
2246 let events = events.collect::<Vec<_>>().await;
2247 thread.update(cx, |thread, _cx| {
2248 let message = thread.last_received_or_pending_message().unwrap();
2249 let agent_message = message.as_agent_message().unwrap();
2250 assert_eq!(
2251 agent_message.content,
2252 vec![AgentMessageContent::Text("Hello".to_string())]
2253 );
2254 });
2255 assert_eq!(stop_events(events), vec![acp::StopReason::EndTurn]);
2256}
2257
2258/// Waits for a terminal tool to start by watching for a ToolCallUpdate with terminal content.
2259async fn wait_for_terminal_tool_started(
2260 events: &mut mpsc::UnboundedReceiver<Result<ThreadEvent>>,
2261 cx: &mut TestAppContext,
2262) {
2263 let deadline = cx.executor().num_cpus() * 100; // Scale with available parallelism
2264 for _ in 0..deadline {
2265 cx.run_until_parked();
2266
2267 while let Some(Some(event)) = events.next().now_or_never() {
2268 if let Ok(ThreadEvent::ToolCallUpdate(acp_thread::ToolCallUpdate::UpdateFields(
2269 update,
2270 ))) = &event
2271 {
2272 if update.fields.content.as_ref().is_some_and(|content| {
2273 content
2274 .iter()
2275 .any(|c| matches!(c, acp::ToolCallContent::Terminal(_)))
2276 }) {
2277 return;
2278 }
2279 }
2280 }
2281
2282 cx.background_executor
2283 .timer(Duration::from_millis(10))
2284 .await;
2285 }
2286 panic!("terminal tool did not start within the expected time");
2287}
2288
2289/// Collects events until a Stop event is received, driving the executor to completion.
2290async fn collect_events_until_stop(
2291 events: &mut mpsc::UnboundedReceiver<Result<ThreadEvent>>,
2292 cx: &mut TestAppContext,
2293) -> Vec<Result<ThreadEvent>> {
2294 let mut collected = Vec::new();
2295 let deadline = cx.executor().num_cpus() * 200;
2296
2297 for _ in 0..deadline {
2298 cx.executor().advance_clock(Duration::from_millis(10));
2299 cx.run_until_parked();
2300
2301 while let Some(Some(event)) = events.next().now_or_never() {
2302 let is_stop = matches!(&event, Ok(ThreadEvent::Stop(_)));
2303 collected.push(event);
2304 if is_stop {
2305 return collected;
2306 }
2307 }
2308 }
2309 panic!(
2310 "did not receive Stop event within the expected time; collected {} events",
2311 collected.len()
2312 );
2313}
2314
2315#[gpui::test]
2316async fn test_truncate_while_terminal_tool_running(cx: &mut TestAppContext) {
2317 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
2318 always_allow_tools(cx);
2319 let fake_model = model.as_fake();
2320
2321 let environment = Rc::new(cx.update(|cx| {
2322 FakeThreadEnvironment::default().with_terminal(FakeTerminalHandle::new_never_exits(cx))
2323 }));
2324 let handle = environment.terminal_handle.clone().unwrap();
2325
2326 let message_id = UserMessageId::new();
2327 let mut events = thread
2328 .update(cx, |thread, cx| {
2329 thread.add_tool(crate::TerminalTool::new(
2330 thread.project().clone(),
2331 environment,
2332 ));
2333 thread.send(message_id.clone(), ["run a command"], cx)
2334 })
2335 .unwrap();
2336
2337 cx.run_until_parked();
2338
2339 // Simulate the model calling the terminal tool
2340 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
2341 LanguageModelToolUse {
2342 id: "terminal_tool_1".into(),
2343 name: TerminalTool::NAME.into(),
2344 raw_input: r#"{"command": "sleep 1000", "cd": "."}"#.into(),
2345 input: json!({"command": "sleep 1000", "cd": "."}),
2346 is_input_complete: true,
2347 thought_signature: None,
2348 },
2349 ));
2350 fake_model.end_last_completion_stream();
2351
2352 // Wait for the terminal tool to start running
2353 wait_for_terminal_tool_started(&mut events, cx).await;
2354
2355 // Truncate the thread while the terminal is running
2356 thread
2357 .update(cx, |thread, cx| thread.truncate(message_id, cx))
2358 .unwrap();
2359
2360 // Drive the executor to let cancellation complete
2361 let _ = collect_events_until_stop(&mut events, cx).await;
2362
2363 // Verify the terminal was killed
2364 assert!(
2365 handle.was_killed(),
2366 "expected terminal handle to be killed on truncate"
2367 );
2368
2369 // Verify the thread is empty after truncation
2370 thread.update(cx, |thread, _cx| {
2371 assert_eq!(
2372 thread.to_markdown(),
2373 "",
2374 "expected thread to be empty after truncating the only message"
2375 );
2376 });
2377
2378 // Verify we can send a new message after truncation
2379 verify_thread_recovery(&thread, &fake_model, cx).await;
2380}
2381
2382#[gpui::test]
2383async fn test_cancel_multiple_concurrent_terminal_tools(cx: &mut TestAppContext) {
2384 // Tests that cancellation properly kills all running terminal tools when multiple are active.
2385 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
2386 always_allow_tools(cx);
2387 let fake_model = model.as_fake();
2388
2389 let environment = Rc::new(MultiTerminalEnvironment::new());
2390
2391 let mut events = thread
2392 .update(cx, |thread, cx| {
2393 thread.add_tool(crate::TerminalTool::new(
2394 thread.project().clone(),
2395 environment.clone(),
2396 ));
2397 thread.send(UserMessageId::new(), ["run multiple commands"], cx)
2398 })
2399 .unwrap();
2400
2401 cx.run_until_parked();
2402
2403 // Simulate the model calling two terminal tools
2404 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
2405 LanguageModelToolUse {
2406 id: "terminal_tool_1".into(),
2407 name: TerminalTool::NAME.into(),
2408 raw_input: r#"{"command": "sleep 1000", "cd": "."}"#.into(),
2409 input: json!({"command": "sleep 1000", "cd": "."}),
2410 is_input_complete: true,
2411 thought_signature: None,
2412 },
2413 ));
2414 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
2415 LanguageModelToolUse {
2416 id: "terminal_tool_2".into(),
2417 name: TerminalTool::NAME.into(),
2418 raw_input: r#"{"command": "sleep 2000", "cd": "."}"#.into(),
2419 input: json!({"command": "sleep 2000", "cd": "."}),
2420 is_input_complete: true,
2421 thought_signature: None,
2422 },
2423 ));
2424 fake_model.end_last_completion_stream();
2425
2426 // Wait for both terminal tools to start by counting terminal content updates
2427 let mut terminals_started = 0;
2428 let deadline = cx.executor().num_cpus() * 100;
2429 for _ in 0..deadline {
2430 cx.run_until_parked();
2431
2432 while let Some(Some(event)) = events.next().now_or_never() {
2433 if let Ok(ThreadEvent::ToolCallUpdate(acp_thread::ToolCallUpdate::UpdateFields(
2434 update,
2435 ))) = &event
2436 {
2437 if update.fields.content.as_ref().is_some_and(|content| {
2438 content
2439 .iter()
2440 .any(|c| matches!(c, acp::ToolCallContent::Terminal(_)))
2441 }) {
2442 terminals_started += 1;
2443 if terminals_started >= 2 {
2444 break;
2445 }
2446 }
2447 }
2448 }
2449 if terminals_started >= 2 {
2450 break;
2451 }
2452
2453 cx.background_executor
2454 .timer(Duration::from_millis(10))
2455 .await;
2456 }
2457 assert!(
2458 terminals_started >= 2,
2459 "expected 2 terminal tools to start, got {terminals_started}"
2460 );
2461
2462 // Cancel the thread while both terminals are running
2463 thread.update(cx, |thread, cx| thread.cancel(cx)).detach();
2464
2465 // Collect remaining events
2466 let remaining_events = collect_events_until_stop(&mut events, cx).await;
2467
2468 // Verify both terminal handles were killed
2469 let handles = environment.handles();
2470 assert_eq!(
2471 handles.len(),
2472 2,
2473 "expected 2 terminal handles to be created"
2474 );
2475 assert!(
2476 handles[0].was_killed(),
2477 "expected first terminal handle to be killed on cancellation"
2478 );
2479 assert!(
2480 handles[1].was_killed(),
2481 "expected second terminal handle to be killed on cancellation"
2482 );
2483
2484 // Verify we got a cancellation stop event
2485 assert_eq!(
2486 stop_events(remaining_events),
2487 vec![acp::StopReason::Cancelled],
2488 );
2489}
2490
2491#[gpui::test]
2492async fn test_terminal_tool_stopped_via_terminal_card_button(cx: &mut TestAppContext) {
2493 // Tests that clicking the stop button on the terminal card (as opposed to the main
2494 // cancel button) properly reports user stopped via the was_stopped_by_user path.
2495 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
2496 always_allow_tools(cx);
2497 let fake_model = model.as_fake();
2498
2499 let environment = Rc::new(cx.update(|cx| {
2500 FakeThreadEnvironment::default().with_terminal(FakeTerminalHandle::new_never_exits(cx))
2501 }));
2502 let handle = environment.terminal_handle.clone().unwrap();
2503
2504 let mut events = thread
2505 .update(cx, |thread, cx| {
2506 thread.add_tool(crate::TerminalTool::new(
2507 thread.project().clone(),
2508 environment,
2509 ));
2510 thread.send(UserMessageId::new(), ["run a command"], cx)
2511 })
2512 .unwrap();
2513
2514 cx.run_until_parked();
2515
2516 // Simulate the model calling the terminal tool
2517 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
2518 LanguageModelToolUse {
2519 id: "terminal_tool_1".into(),
2520 name: TerminalTool::NAME.into(),
2521 raw_input: r#"{"command": "sleep 1000", "cd": "."}"#.into(),
2522 input: json!({"command": "sleep 1000", "cd": "."}),
2523 is_input_complete: true,
2524 thought_signature: None,
2525 },
2526 ));
2527 fake_model.end_last_completion_stream();
2528
2529 // Wait for the terminal tool to start running
2530 wait_for_terminal_tool_started(&mut events, cx).await;
2531
2532 // Simulate user clicking stop on the terminal card itself.
2533 // This sets the flag and signals exit (simulating what the real UI would do).
2534 handle.set_stopped_by_user(true);
2535 handle.killed.store(true, Ordering::SeqCst);
2536 handle.signal_exit();
2537
2538 // Wait for the tool to complete
2539 cx.run_until_parked();
2540
2541 // The thread continues after tool completion - simulate the model ending its turn
2542 fake_model
2543 .send_last_completion_stream_event(LanguageModelCompletionEvent::Stop(StopReason::EndTurn));
2544 fake_model.end_last_completion_stream();
2545
2546 // Collect remaining events
2547 let remaining_events = collect_events_until_stop(&mut events, cx).await;
2548
2549 // Verify we got an EndTurn (not Cancelled, since we didn't cancel the thread)
2550 assert_eq!(
2551 stop_events(remaining_events),
2552 vec![acp::StopReason::EndTurn],
2553 );
2554
2555 // Verify the tool result indicates user stopped
2556 thread.update(cx, |thread, _cx| {
2557 let message = thread.last_received_or_pending_message().unwrap();
2558 let agent_message = message.as_agent_message().unwrap();
2559
2560 let tool_use = agent_message
2561 .content
2562 .iter()
2563 .find_map(|content| match content {
2564 AgentMessageContent::ToolUse(tool_use) => Some(tool_use),
2565 _ => None,
2566 })
2567 .expect("expected tool use in agent message");
2568
2569 let tool_result = agent_message
2570 .tool_results
2571 .get(&tool_use.id)
2572 .expect("expected tool result");
2573
2574 let result_text = match &tool_result.content {
2575 language_model::LanguageModelToolResultContent::Text(text) => text.to_string(),
2576 _ => panic!("expected text content in tool result"),
2577 };
2578
2579 assert!(
2580 result_text.contains("The user stopped this command"),
2581 "expected tool result to indicate user stopped, got: {result_text}"
2582 );
2583 });
2584}
2585
2586#[gpui::test]
2587async fn test_terminal_tool_timeout_expires(cx: &mut TestAppContext) {
2588 // Tests that when a timeout is configured and expires, the tool result indicates timeout.
2589 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
2590 always_allow_tools(cx);
2591 let fake_model = model.as_fake();
2592
2593 let environment = Rc::new(cx.update(|cx| {
2594 FakeThreadEnvironment::default().with_terminal(FakeTerminalHandle::new_never_exits(cx))
2595 }));
2596 let handle = environment.terminal_handle.clone().unwrap();
2597
2598 let mut events = thread
2599 .update(cx, |thread, cx| {
2600 thread.add_tool(crate::TerminalTool::new(
2601 thread.project().clone(),
2602 environment,
2603 ));
2604 thread.send(UserMessageId::new(), ["run a command with timeout"], cx)
2605 })
2606 .unwrap();
2607
2608 cx.run_until_parked();
2609
2610 // Simulate the model calling the terminal tool with a short timeout
2611 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
2612 LanguageModelToolUse {
2613 id: "terminal_tool_1".into(),
2614 name: TerminalTool::NAME.into(),
2615 raw_input: r#"{"command": "sleep 1000", "cd": ".", "timeout_ms": 100}"#.into(),
2616 input: json!({"command": "sleep 1000", "cd": ".", "timeout_ms": 100}),
2617 is_input_complete: true,
2618 thought_signature: None,
2619 },
2620 ));
2621 fake_model.end_last_completion_stream();
2622
2623 // Wait for the terminal tool to start running
2624 wait_for_terminal_tool_started(&mut events, cx).await;
2625
2626 // Advance clock past the timeout
2627 cx.executor().advance_clock(Duration::from_millis(200));
2628 cx.run_until_parked();
2629
2630 // The thread continues after tool completion - simulate the model ending its turn
2631 fake_model
2632 .send_last_completion_stream_event(LanguageModelCompletionEvent::Stop(StopReason::EndTurn));
2633 fake_model.end_last_completion_stream();
2634
2635 // Collect remaining events
2636 let remaining_events = collect_events_until_stop(&mut events, cx).await;
2637
2638 // Verify the terminal was killed due to timeout
2639 assert!(
2640 handle.was_killed(),
2641 "expected terminal handle to be killed on timeout"
2642 );
2643
2644 // Verify we got an EndTurn (the tool completed, just with timeout)
2645 assert_eq!(
2646 stop_events(remaining_events),
2647 vec![acp::StopReason::EndTurn],
2648 );
2649
2650 // Verify the tool result indicates timeout, not user stopped
2651 thread.update(cx, |thread, _cx| {
2652 let message = thread.last_received_or_pending_message().unwrap();
2653 let agent_message = message.as_agent_message().unwrap();
2654
2655 let tool_use = agent_message
2656 .content
2657 .iter()
2658 .find_map(|content| match content {
2659 AgentMessageContent::ToolUse(tool_use) => Some(tool_use),
2660 _ => None,
2661 })
2662 .expect("expected tool use in agent message");
2663
2664 let tool_result = agent_message
2665 .tool_results
2666 .get(&tool_use.id)
2667 .expect("expected tool result");
2668
2669 let result_text = match &tool_result.content {
2670 language_model::LanguageModelToolResultContent::Text(text) => text.to_string(),
2671 _ => panic!("expected text content in tool result"),
2672 };
2673
2674 assert!(
2675 result_text.contains("timed out"),
2676 "expected tool result to indicate timeout, got: {result_text}"
2677 );
2678 assert!(
2679 !result_text.contains("The user stopped"),
2680 "tool result should not mention user stopped when it timed out, got: {result_text}"
2681 );
2682 });
2683}
2684
2685#[gpui::test]
2686async fn test_in_progress_send_canceled_by_next_send(cx: &mut TestAppContext) {
2687 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
2688 let fake_model = model.as_fake();
2689
2690 let events_1 = thread
2691 .update(cx, |thread, cx| {
2692 thread.send(UserMessageId::new(), ["Hello 1"], cx)
2693 })
2694 .unwrap();
2695 cx.run_until_parked();
2696 fake_model.send_last_completion_stream_text_chunk("Hey 1!");
2697 cx.run_until_parked();
2698
2699 let events_2 = thread
2700 .update(cx, |thread, cx| {
2701 thread.send(UserMessageId::new(), ["Hello 2"], cx)
2702 })
2703 .unwrap();
2704 cx.run_until_parked();
2705 fake_model.send_last_completion_stream_text_chunk("Hey 2!");
2706 fake_model
2707 .send_last_completion_stream_event(LanguageModelCompletionEvent::Stop(StopReason::EndTurn));
2708 fake_model.end_last_completion_stream();
2709
2710 let events_1 = events_1.collect::<Vec<_>>().await;
2711 assert_eq!(stop_events(events_1), vec![acp::StopReason::Cancelled]);
2712 let events_2 = events_2.collect::<Vec<_>>().await;
2713 assert_eq!(stop_events(events_2), vec![acp::StopReason::EndTurn]);
2714}
2715
2716#[gpui::test]
2717async fn test_retry_cancelled_promptly_on_new_send(cx: &mut TestAppContext) {
2718 // Regression test: when a completion fails with a retryable error (e.g. upstream 500),
2719 // the retry loop waits on a timer. If the user switches models and sends a new message
2720 // during that delay, the old turn should exit immediately instead of retrying with the
2721 // stale model.
2722 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
2723 let model_a = model.as_fake();
2724
2725 // Start a turn with model_a.
2726 let events_1 = thread
2727 .update(cx, |thread, cx| {
2728 thread.send(UserMessageId::new(), ["Hello"], cx)
2729 })
2730 .unwrap();
2731 cx.run_until_parked();
2732 assert_eq!(model_a.completion_count(), 1);
2733
2734 // Model returns a retryable upstream 500. The turn enters the retry delay.
2735 model_a.send_last_completion_stream_error(
2736 LanguageModelCompletionError::UpstreamProviderError {
2737 message: "Internal server error".to_string(),
2738 status: http_client::StatusCode::INTERNAL_SERVER_ERROR,
2739 retry_after: None,
2740 },
2741 );
2742 model_a.end_last_completion_stream();
2743 cx.run_until_parked();
2744
2745 // The old completion was consumed; model_a has no pending requests yet because the
2746 // retry timer hasn't fired.
2747 assert_eq!(model_a.completion_count(), 0);
2748
2749 // Switch to model_b and send a new message. This cancels the old turn.
2750 let model_b = Arc::new(FakeLanguageModel::with_id_and_thinking(
2751 "fake", "model-b", "Model B", false,
2752 ));
2753 thread.update(cx, |thread, cx| {
2754 thread.set_model(model_b.clone(), cx);
2755 });
2756 let events_2 = thread
2757 .update(cx, |thread, cx| {
2758 thread.send(UserMessageId::new(), ["Continue"], cx)
2759 })
2760 .unwrap();
2761 cx.run_until_parked();
2762
2763 // model_b should have received its completion request.
2764 assert_eq!(model_b.as_fake().completion_count(), 1);
2765
2766 // Advance the clock well past the retry delay (BASE_RETRY_DELAY = 5s).
2767 cx.executor().advance_clock(Duration::from_secs(10));
2768 cx.run_until_parked();
2769
2770 // model_a must NOT have received another completion request — the cancelled turn
2771 // should have exited during the retry delay rather than retrying with the old model.
2772 assert_eq!(
2773 model_a.completion_count(),
2774 0,
2775 "old model should not receive a retry request after cancellation"
2776 );
2777
2778 // Complete model_b's turn.
2779 model_b
2780 .as_fake()
2781 .send_last_completion_stream_text_chunk("Done!");
2782 model_b
2783 .as_fake()
2784 .send_last_completion_stream_event(LanguageModelCompletionEvent::Stop(StopReason::EndTurn));
2785 model_b.as_fake().end_last_completion_stream();
2786
2787 let events_1 = events_1.collect::<Vec<_>>().await;
2788 assert_eq!(stop_events(events_1), vec![acp::StopReason::Cancelled]);
2789
2790 let events_2 = events_2.collect::<Vec<_>>().await;
2791 assert_eq!(stop_events(events_2), vec![acp::StopReason::EndTurn]);
2792}
2793
2794#[gpui::test]
2795async fn test_subsequent_successful_sends_dont_cancel(cx: &mut TestAppContext) {
2796 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
2797 let fake_model = model.as_fake();
2798
2799 let events_1 = thread
2800 .update(cx, |thread, cx| {
2801 thread.send(UserMessageId::new(), ["Hello 1"], cx)
2802 })
2803 .unwrap();
2804 cx.run_until_parked();
2805 fake_model.send_last_completion_stream_text_chunk("Hey 1!");
2806 fake_model
2807 .send_last_completion_stream_event(LanguageModelCompletionEvent::Stop(StopReason::EndTurn));
2808 fake_model.end_last_completion_stream();
2809 let events_1 = events_1.collect::<Vec<_>>().await;
2810
2811 let events_2 = thread
2812 .update(cx, |thread, cx| {
2813 thread.send(UserMessageId::new(), ["Hello 2"], cx)
2814 })
2815 .unwrap();
2816 cx.run_until_parked();
2817 fake_model.send_last_completion_stream_text_chunk("Hey 2!");
2818 fake_model
2819 .send_last_completion_stream_event(LanguageModelCompletionEvent::Stop(StopReason::EndTurn));
2820 fake_model.end_last_completion_stream();
2821 let events_2 = events_2.collect::<Vec<_>>().await;
2822
2823 assert_eq!(stop_events(events_1), vec![acp::StopReason::EndTurn]);
2824 assert_eq!(stop_events(events_2), vec![acp::StopReason::EndTurn]);
2825}
2826
2827#[gpui::test]
2828async fn test_refusal(cx: &mut TestAppContext) {
2829 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
2830 let fake_model = model.as_fake();
2831
2832 let events = thread
2833 .update(cx, |thread, cx| {
2834 thread.send(UserMessageId::new(), ["Hello"], cx)
2835 })
2836 .unwrap();
2837 cx.run_until_parked();
2838 thread.read_with(cx, |thread, _| {
2839 assert_eq!(
2840 thread.to_markdown(),
2841 indoc! {"
2842 ## User
2843
2844 Hello
2845 "}
2846 );
2847 });
2848
2849 fake_model.send_last_completion_stream_text_chunk("Hey!");
2850 cx.run_until_parked();
2851 thread.read_with(cx, |thread, _| {
2852 assert_eq!(
2853 thread.to_markdown(),
2854 indoc! {"
2855 ## User
2856
2857 Hello
2858
2859 ## Assistant
2860
2861 Hey!
2862 "}
2863 );
2864 });
2865
2866 // If the model refuses to continue, the thread should remove all the messages after the last user message.
2867 fake_model
2868 .send_last_completion_stream_event(LanguageModelCompletionEvent::Stop(StopReason::Refusal));
2869 let events = events.collect::<Vec<_>>().await;
2870 assert_eq!(stop_events(events), vec![acp::StopReason::Refusal]);
2871 thread.read_with(cx, |thread, _| {
2872 assert_eq!(thread.to_markdown(), "");
2873 });
2874}
2875
2876#[gpui::test]
2877async fn test_truncate_first_message(cx: &mut TestAppContext) {
2878 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
2879 let fake_model = model.as_fake();
2880
2881 let message_id = UserMessageId::new();
2882 thread
2883 .update(cx, |thread, cx| {
2884 thread.send(message_id.clone(), ["Hello"], cx)
2885 })
2886 .unwrap();
2887 cx.run_until_parked();
2888 thread.read_with(cx, |thread, _| {
2889 assert_eq!(
2890 thread.to_markdown(),
2891 indoc! {"
2892 ## User
2893
2894 Hello
2895 "}
2896 );
2897 assert_eq!(thread.latest_token_usage(), None);
2898 });
2899
2900 fake_model.send_last_completion_stream_text_chunk("Hey!");
2901 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::UsageUpdate(
2902 language_model::TokenUsage {
2903 input_tokens: 32_000,
2904 output_tokens: 16_000,
2905 cache_creation_input_tokens: 0,
2906 cache_read_input_tokens: 0,
2907 },
2908 ));
2909 cx.run_until_parked();
2910 thread.read_with(cx, |thread, _| {
2911 assert_eq!(
2912 thread.to_markdown(),
2913 indoc! {"
2914 ## User
2915
2916 Hello
2917
2918 ## Assistant
2919
2920 Hey!
2921 "}
2922 );
2923 assert_eq!(
2924 thread.latest_token_usage(),
2925 Some(acp_thread::TokenUsage {
2926 used_tokens: 32_000 + 16_000,
2927 max_tokens: 1_000_000,
2928 max_output_tokens: None,
2929 input_tokens: 32_000,
2930 output_tokens: 16_000,
2931 })
2932 );
2933 });
2934
2935 thread
2936 .update(cx, |thread, cx| thread.truncate(message_id, cx))
2937 .unwrap();
2938 cx.run_until_parked();
2939 thread.read_with(cx, |thread, _| {
2940 assert_eq!(thread.to_markdown(), "");
2941 assert_eq!(thread.latest_token_usage(), None);
2942 });
2943
2944 // Ensure we can still send a new message after truncation.
2945 thread
2946 .update(cx, |thread, cx| {
2947 thread.send(UserMessageId::new(), ["Hi"], cx)
2948 })
2949 .unwrap();
2950 thread.update(cx, |thread, _cx| {
2951 assert_eq!(
2952 thread.to_markdown(),
2953 indoc! {"
2954 ## User
2955
2956 Hi
2957 "}
2958 );
2959 });
2960 cx.run_until_parked();
2961 fake_model.send_last_completion_stream_text_chunk("Ahoy!");
2962 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::UsageUpdate(
2963 language_model::TokenUsage {
2964 input_tokens: 40_000,
2965 output_tokens: 20_000,
2966 cache_creation_input_tokens: 0,
2967 cache_read_input_tokens: 0,
2968 },
2969 ));
2970 cx.run_until_parked();
2971 thread.read_with(cx, |thread, _| {
2972 assert_eq!(
2973 thread.to_markdown(),
2974 indoc! {"
2975 ## User
2976
2977 Hi
2978
2979 ## Assistant
2980
2981 Ahoy!
2982 "}
2983 );
2984
2985 assert_eq!(
2986 thread.latest_token_usage(),
2987 Some(acp_thread::TokenUsage {
2988 used_tokens: 40_000 + 20_000,
2989 max_tokens: 1_000_000,
2990 max_output_tokens: None,
2991 input_tokens: 40_000,
2992 output_tokens: 20_000,
2993 })
2994 );
2995 });
2996}
2997
2998#[gpui::test]
2999async fn test_truncate_second_message(cx: &mut TestAppContext) {
3000 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
3001 let fake_model = model.as_fake();
3002
3003 thread
3004 .update(cx, |thread, cx| {
3005 thread.send(UserMessageId::new(), ["Message 1"], cx)
3006 })
3007 .unwrap();
3008 cx.run_until_parked();
3009 fake_model.send_last_completion_stream_text_chunk("Message 1 response");
3010 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::UsageUpdate(
3011 language_model::TokenUsage {
3012 input_tokens: 32_000,
3013 output_tokens: 16_000,
3014 cache_creation_input_tokens: 0,
3015 cache_read_input_tokens: 0,
3016 },
3017 ));
3018 fake_model.end_last_completion_stream();
3019 cx.run_until_parked();
3020
3021 let assert_first_message_state = |cx: &mut TestAppContext| {
3022 thread.clone().read_with(cx, |thread, _| {
3023 assert_eq!(
3024 thread.to_markdown(),
3025 indoc! {"
3026 ## User
3027
3028 Message 1
3029
3030 ## Assistant
3031
3032 Message 1 response
3033 "}
3034 );
3035
3036 assert_eq!(
3037 thread.latest_token_usage(),
3038 Some(acp_thread::TokenUsage {
3039 used_tokens: 32_000 + 16_000,
3040 max_tokens: 1_000_000,
3041 max_output_tokens: None,
3042 input_tokens: 32_000,
3043 output_tokens: 16_000,
3044 })
3045 );
3046 });
3047 };
3048
3049 assert_first_message_state(cx);
3050
3051 let second_message_id = UserMessageId::new();
3052 thread
3053 .update(cx, |thread, cx| {
3054 thread.send(second_message_id.clone(), ["Message 2"], cx)
3055 })
3056 .unwrap();
3057 cx.run_until_parked();
3058
3059 fake_model.send_last_completion_stream_text_chunk("Message 2 response");
3060 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::UsageUpdate(
3061 language_model::TokenUsage {
3062 input_tokens: 40_000,
3063 output_tokens: 20_000,
3064 cache_creation_input_tokens: 0,
3065 cache_read_input_tokens: 0,
3066 },
3067 ));
3068 fake_model.end_last_completion_stream();
3069 cx.run_until_parked();
3070
3071 thread.read_with(cx, |thread, _| {
3072 assert_eq!(
3073 thread.to_markdown(),
3074 indoc! {"
3075 ## User
3076
3077 Message 1
3078
3079 ## Assistant
3080
3081 Message 1 response
3082
3083 ## User
3084
3085 Message 2
3086
3087 ## Assistant
3088
3089 Message 2 response
3090 "}
3091 );
3092
3093 assert_eq!(
3094 thread.latest_token_usage(),
3095 Some(acp_thread::TokenUsage {
3096 used_tokens: 40_000 + 20_000,
3097 max_tokens: 1_000_000,
3098 max_output_tokens: None,
3099 input_tokens: 40_000,
3100 output_tokens: 20_000,
3101 })
3102 );
3103 });
3104
3105 thread
3106 .update(cx, |thread, cx| thread.truncate(second_message_id, cx))
3107 .unwrap();
3108 cx.run_until_parked();
3109
3110 assert_first_message_state(cx);
3111}
3112
3113#[gpui::test]
3114async fn test_title_generation(cx: &mut TestAppContext) {
3115 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
3116 let fake_model = model.as_fake();
3117
3118 let summary_model = Arc::new(FakeLanguageModel::default());
3119 thread.update(cx, |thread, cx| {
3120 thread.set_summarization_model(Some(summary_model.clone()), cx)
3121 });
3122
3123 let send = thread
3124 .update(cx, |thread, cx| {
3125 thread.send(UserMessageId::new(), ["Hello"], cx)
3126 })
3127 .unwrap();
3128 cx.run_until_parked();
3129
3130 fake_model.send_last_completion_stream_text_chunk("Hey!");
3131 fake_model.end_last_completion_stream();
3132 cx.run_until_parked();
3133 thread.read_with(cx, |thread, _| assert_eq!(thread.title(), None));
3134
3135 // Ensure the summary model has been invoked to generate a title.
3136 summary_model.send_last_completion_stream_text_chunk("Hello ");
3137 summary_model.send_last_completion_stream_text_chunk("world\nG");
3138 summary_model.send_last_completion_stream_text_chunk("oodnight Moon");
3139 summary_model.end_last_completion_stream();
3140 send.collect::<Vec<_>>().await;
3141 cx.run_until_parked();
3142 thread.read_with(cx, |thread, _| {
3143 assert_eq!(thread.title(), Some("Hello world".into()))
3144 });
3145
3146 // Send another message, ensuring no title is generated this time.
3147 let send = thread
3148 .update(cx, |thread, cx| {
3149 thread.send(UserMessageId::new(), ["Hello again"], cx)
3150 })
3151 .unwrap();
3152 cx.run_until_parked();
3153 fake_model.send_last_completion_stream_text_chunk("Hey again!");
3154 fake_model.end_last_completion_stream();
3155 cx.run_until_parked();
3156 assert_eq!(summary_model.pending_completions(), Vec::new());
3157 send.collect::<Vec<_>>().await;
3158 thread.read_with(cx, |thread, _| {
3159 assert_eq!(thread.title(), Some("Hello world".into()))
3160 });
3161}
3162
3163#[gpui::test]
3164async fn test_building_request_with_pending_tools(cx: &mut TestAppContext) {
3165 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
3166 let fake_model = model.as_fake();
3167
3168 let _events = thread
3169 .update(cx, |thread, cx| {
3170 thread.add_tool(ToolRequiringPermission);
3171 thread.add_tool(EchoTool);
3172 thread.send(UserMessageId::new(), ["Hey!"], cx)
3173 })
3174 .unwrap();
3175 cx.run_until_parked();
3176
3177 let permission_tool_use = LanguageModelToolUse {
3178 id: "tool_id_1".into(),
3179 name: ToolRequiringPermission::NAME.into(),
3180 raw_input: "{}".into(),
3181 input: json!({}),
3182 is_input_complete: true,
3183 thought_signature: None,
3184 };
3185 let echo_tool_use = LanguageModelToolUse {
3186 id: "tool_id_2".into(),
3187 name: EchoTool::NAME.into(),
3188 raw_input: json!({"text": "test"}).to_string(),
3189 input: json!({"text": "test"}),
3190 is_input_complete: true,
3191 thought_signature: None,
3192 };
3193 fake_model.send_last_completion_stream_text_chunk("Hi!");
3194 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
3195 permission_tool_use,
3196 ));
3197 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
3198 echo_tool_use.clone(),
3199 ));
3200 fake_model.end_last_completion_stream();
3201 cx.run_until_parked();
3202
3203 // Ensure pending tools are skipped when building a request.
3204 let request = thread
3205 .read_with(cx, |thread, cx| {
3206 thread.build_completion_request(CompletionIntent::EditFile, cx)
3207 })
3208 .unwrap();
3209 assert_eq!(
3210 request.messages[1..],
3211 vec![
3212 LanguageModelRequestMessage {
3213 role: Role::User,
3214 content: vec!["Hey!".into()],
3215 cache: true,
3216 reasoning_details: None,
3217 },
3218 LanguageModelRequestMessage {
3219 role: Role::Assistant,
3220 content: vec![
3221 MessageContent::Text("Hi!".into()),
3222 MessageContent::ToolUse(echo_tool_use.clone())
3223 ],
3224 cache: false,
3225 reasoning_details: None,
3226 },
3227 LanguageModelRequestMessage {
3228 role: Role::User,
3229 content: vec![MessageContent::ToolResult(LanguageModelToolResult {
3230 tool_use_id: echo_tool_use.id.clone(),
3231 tool_name: echo_tool_use.name,
3232 is_error: false,
3233 content: "test".into(),
3234 output: Some("test".into())
3235 })],
3236 cache: false,
3237 reasoning_details: None,
3238 },
3239 ],
3240 );
3241}
3242
3243#[gpui::test]
3244async fn test_agent_connection(cx: &mut TestAppContext) {
3245 cx.update(settings::init);
3246 let templates = Templates::new();
3247
3248 // Initialize language model system with test provider
3249 cx.update(|cx| {
3250 gpui_tokio::init(cx);
3251
3252 let http_client = FakeHttpClient::with_404_response();
3253 let clock = Arc::new(clock::FakeSystemClock::new());
3254 let client = Client::new(clock, http_client, cx);
3255 let user_store = cx.new(|cx| UserStore::new(client.clone(), cx));
3256 language_model::init(cx);
3257 RefreshLlmTokenListener::register(client.clone(), user_store.clone(), cx);
3258 language_models::init(user_store, client.clone(), cx);
3259 LanguageModelRegistry::test(cx);
3260 });
3261 cx.executor().forbid_parking();
3262
3263 // Create a project for new_thread
3264 let fake_fs = cx.update(|cx| fs::FakeFs::new(cx.background_executor().clone()));
3265 fake_fs.insert_tree(path!("/test"), json!({})).await;
3266 let project = Project::test(fake_fs.clone(), [Path::new("/test")], cx).await;
3267 let cwd = PathList::new(&[Path::new("/test")]);
3268 let thread_store = cx.new(|cx| ThreadStore::new(cx));
3269
3270 // Create agent and connection
3271 let agent = cx
3272 .update(|cx| NativeAgent::new(thread_store, templates.clone(), None, fake_fs.clone(), cx));
3273 let connection = NativeAgentConnection(agent.clone());
3274
3275 // Create a thread using new_thread
3276 let connection_rc = Rc::new(connection.clone());
3277 let acp_thread = cx
3278 .update(|cx| connection_rc.new_session(project, cwd, cx))
3279 .await
3280 .expect("new_thread should succeed");
3281
3282 // Get the session_id from the AcpThread
3283 let session_id = acp_thread.read_with(cx, |thread, _| thread.session_id().clone());
3284
3285 // Test model_selector returns Some
3286 let selector_opt = connection.model_selector(&session_id);
3287 assert!(
3288 selector_opt.is_some(),
3289 "agent should always support ModelSelector"
3290 );
3291 let selector = selector_opt.unwrap();
3292
3293 // Test list_models
3294 let listed_models = cx
3295 .update(|cx| selector.list_models(cx))
3296 .await
3297 .expect("list_models should succeed");
3298 let AgentModelList::Grouped(listed_models) = listed_models else {
3299 panic!("Unexpected model list type");
3300 };
3301 assert!(!listed_models.is_empty(), "should have at least one model");
3302 assert_eq!(
3303 listed_models[&AgentModelGroupName("Fake".into())][0]
3304 .id
3305 .0
3306 .as_ref(),
3307 "fake/fake"
3308 );
3309
3310 // Test selected_model returns the default
3311 let model = cx
3312 .update(|cx| selector.selected_model(cx))
3313 .await
3314 .expect("selected_model should succeed");
3315 let model = cx
3316 .update(|cx| agent.read(cx).models().model_from_id(&model.id))
3317 .unwrap();
3318 let model = model.as_fake();
3319 assert_eq!(model.id().0, "fake", "should return default model");
3320
3321 let request = acp_thread.update(cx, |thread, cx| thread.send(vec!["abc".into()], cx));
3322 cx.run_until_parked();
3323 model.send_last_completion_stream_text_chunk("def");
3324 cx.run_until_parked();
3325 acp_thread.read_with(cx, |thread, cx| {
3326 assert_eq!(
3327 thread.to_markdown(cx),
3328 indoc! {"
3329 ## User
3330
3331 abc
3332
3333 ## Assistant
3334
3335 def
3336
3337 "}
3338 )
3339 });
3340
3341 // Test cancel
3342 cx.update(|cx| connection.cancel(&session_id, cx));
3343 request.await.expect("prompt should fail gracefully");
3344
3345 // Explicitly close the session and drop the ACP thread.
3346 cx.update(|cx| Rc::new(connection.clone()).close_session(&session_id, cx))
3347 .await
3348 .unwrap();
3349 drop(acp_thread);
3350 let result = cx
3351 .update(|cx| {
3352 connection.prompt(
3353 Some(acp_thread::UserMessageId::new()),
3354 acp::PromptRequest::new(session_id.clone(), vec!["ghi".into()]),
3355 cx,
3356 )
3357 })
3358 .await;
3359 assert_eq!(
3360 result.as_ref().unwrap_err().to_string(),
3361 "Session not found",
3362 "unexpected result: {:?}",
3363 result
3364 );
3365}
3366
3367#[gpui::test]
3368async fn test_tool_updates_to_completion(cx: &mut TestAppContext) {
3369 let ThreadTest { thread, model, .. } = setup(cx, TestModel::Fake).await;
3370 thread.update(cx, |thread, _cx| thread.add_tool(EchoTool));
3371 let fake_model = model.as_fake();
3372
3373 let mut events = thread
3374 .update(cx, |thread, cx| {
3375 thread.send(UserMessageId::new(), ["Echo something"], cx)
3376 })
3377 .unwrap();
3378 cx.run_until_parked();
3379
3380 // Simulate streaming partial input.
3381 let input = json!({});
3382 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
3383 LanguageModelToolUse {
3384 id: "1".into(),
3385 name: EchoTool::NAME.into(),
3386 raw_input: input.to_string(),
3387 input,
3388 is_input_complete: false,
3389 thought_signature: None,
3390 },
3391 ));
3392
3393 // Input streaming completed
3394 let input = json!({ "text": "Hello!" });
3395 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
3396 LanguageModelToolUse {
3397 id: "1".into(),
3398 name: "echo".into(),
3399 raw_input: input.to_string(),
3400 input,
3401 is_input_complete: true,
3402 thought_signature: None,
3403 },
3404 ));
3405 fake_model.end_last_completion_stream();
3406 cx.run_until_parked();
3407
3408 let tool_call = expect_tool_call(&mut events).await;
3409 assert_eq!(
3410 tool_call,
3411 acp::ToolCall::new("1", "Echo")
3412 .raw_input(json!({}))
3413 .meta(acp::Meta::from_iter([("tool_name".into(), "echo".into())]))
3414 );
3415 let update = expect_tool_call_update_fields(&mut events).await;
3416 assert_eq!(
3417 update,
3418 acp::ToolCallUpdate::new(
3419 "1",
3420 acp::ToolCallUpdateFields::new()
3421 .title("Echo")
3422 .kind(acp::ToolKind::Other)
3423 .raw_input(json!({ "text": "Hello!"}))
3424 )
3425 );
3426 let update = expect_tool_call_update_fields(&mut events).await;
3427 assert_eq!(
3428 update,
3429 acp::ToolCallUpdate::new(
3430 "1",
3431 acp::ToolCallUpdateFields::new().status(acp::ToolCallStatus::InProgress)
3432 )
3433 );
3434 let update = expect_tool_call_update_fields(&mut events).await;
3435 assert_eq!(
3436 update,
3437 acp::ToolCallUpdate::new(
3438 "1",
3439 acp::ToolCallUpdateFields::new()
3440 .status(acp::ToolCallStatus::Completed)
3441 .raw_output("Hello!")
3442 )
3443 );
3444}
3445
3446#[gpui::test]
3447async fn test_update_plan_tool_updates_thread_events(cx: &mut TestAppContext) {
3448 let ThreadTest { thread, model, .. } = setup(cx, TestModel::Fake).await;
3449 thread.update(cx, |thread, _cx| thread.add_tool(UpdatePlanTool));
3450 let fake_model = model.as_fake();
3451
3452 let mut events = thread
3453 .update(cx, |thread, cx| {
3454 thread.send(UserMessageId::new(), ["Make a plan"], cx)
3455 })
3456 .unwrap();
3457 cx.run_until_parked();
3458
3459 let input = json!({
3460 "plan": [
3461 {
3462 "step": "Inspect the code",
3463 "status": "completed",
3464 },
3465 {
3466 "step": "Implement the tool",
3467 "status": "in_progress"
3468 },
3469 {
3470 "step": "Run tests",
3471 "status": "pending",
3472 }
3473 ]
3474 });
3475 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
3476 LanguageModelToolUse {
3477 id: "plan_1".into(),
3478 name: UpdatePlanTool::NAME.into(),
3479 raw_input: input.to_string(),
3480 input,
3481 is_input_complete: true,
3482 thought_signature: None,
3483 },
3484 ));
3485 fake_model.end_last_completion_stream();
3486 cx.run_until_parked();
3487
3488 let tool_call = expect_tool_call(&mut events).await;
3489 assert_eq!(
3490 tool_call,
3491 acp::ToolCall::new("plan_1", "Update plan")
3492 .kind(acp::ToolKind::Think)
3493 .raw_input(json!({
3494 "plan": [
3495 {
3496 "step": "Inspect the code",
3497 "status": "completed",
3498 },
3499 {
3500 "step": "Implement the tool",
3501 "status": "in_progress"
3502 },
3503 {
3504 "step": "Run tests",
3505 "status": "pending",
3506 }
3507 ]
3508 }))
3509 .meta(acp::Meta::from_iter([(
3510 "tool_name".into(),
3511 "update_plan".into()
3512 )]))
3513 );
3514
3515 let update = expect_tool_call_update_fields(&mut events).await;
3516 assert_eq!(
3517 update,
3518 acp::ToolCallUpdate::new(
3519 "plan_1",
3520 acp::ToolCallUpdateFields::new().status(acp::ToolCallStatus::InProgress)
3521 )
3522 );
3523
3524 let plan = expect_plan(&mut events).await;
3525 assert_eq!(
3526 plan,
3527 acp::Plan::new(vec![
3528 acp::PlanEntry::new(
3529 "Inspect the code",
3530 acp::PlanEntryPriority::Medium,
3531 acp::PlanEntryStatus::Completed,
3532 ),
3533 acp::PlanEntry::new(
3534 "Implement the tool",
3535 acp::PlanEntryPriority::Medium,
3536 acp::PlanEntryStatus::InProgress,
3537 ),
3538 acp::PlanEntry::new(
3539 "Run tests",
3540 acp::PlanEntryPriority::Medium,
3541 acp::PlanEntryStatus::Pending,
3542 ),
3543 ])
3544 );
3545
3546 let update = expect_tool_call_update_fields(&mut events).await;
3547 assert_eq!(
3548 update,
3549 acp::ToolCallUpdate::new(
3550 "plan_1",
3551 acp::ToolCallUpdateFields::new()
3552 .status(acp::ToolCallStatus::Completed)
3553 .raw_output("Plan updated")
3554 )
3555 );
3556}
3557
3558#[gpui::test]
3559async fn test_send_no_retry_on_success(cx: &mut TestAppContext) {
3560 let ThreadTest { thread, model, .. } = setup(cx, TestModel::Fake).await;
3561 let fake_model = model.as_fake();
3562
3563 let mut events = thread
3564 .update(cx, |thread, cx| {
3565 thread.send(UserMessageId::new(), ["Hello!"], cx)
3566 })
3567 .unwrap();
3568 cx.run_until_parked();
3569
3570 fake_model.send_last_completion_stream_text_chunk("Hey!");
3571 fake_model.end_last_completion_stream();
3572
3573 let mut retry_events = Vec::new();
3574 while let Some(Ok(event)) = events.next().await {
3575 match event {
3576 ThreadEvent::Retry(retry_status) => {
3577 retry_events.push(retry_status);
3578 }
3579 ThreadEvent::Stop(..) => break,
3580 _ => {}
3581 }
3582 }
3583
3584 assert_eq!(retry_events.len(), 0);
3585 thread.read_with(cx, |thread, _cx| {
3586 assert_eq!(
3587 thread.to_markdown(),
3588 indoc! {"
3589 ## User
3590
3591 Hello!
3592
3593 ## Assistant
3594
3595 Hey!
3596 "}
3597 )
3598 });
3599}
3600
3601#[gpui::test]
3602async fn test_send_retry_on_error(cx: &mut TestAppContext) {
3603 let ThreadTest { thread, model, .. } = setup(cx, TestModel::Fake).await;
3604 let fake_model = model.as_fake();
3605
3606 let mut events = thread
3607 .update(cx, |thread, cx| {
3608 thread.send(UserMessageId::new(), ["Hello!"], cx)
3609 })
3610 .unwrap();
3611 cx.run_until_parked();
3612
3613 fake_model.send_last_completion_stream_text_chunk("Hey,");
3614 fake_model.send_last_completion_stream_error(LanguageModelCompletionError::ServerOverloaded {
3615 provider: LanguageModelProviderName::new("Anthropic"),
3616 retry_after: Some(Duration::from_secs(3)),
3617 });
3618 fake_model.end_last_completion_stream();
3619
3620 cx.executor().advance_clock(Duration::from_secs(3));
3621 cx.run_until_parked();
3622
3623 fake_model.send_last_completion_stream_text_chunk("there!");
3624 fake_model.end_last_completion_stream();
3625 cx.run_until_parked();
3626
3627 let mut retry_events = Vec::new();
3628 while let Some(Ok(event)) = events.next().await {
3629 match event {
3630 ThreadEvent::Retry(retry_status) => {
3631 retry_events.push(retry_status);
3632 }
3633 ThreadEvent::Stop(..) => break,
3634 _ => {}
3635 }
3636 }
3637
3638 assert_eq!(retry_events.len(), 1);
3639 assert!(matches!(
3640 retry_events[0],
3641 acp_thread::RetryStatus { attempt: 1, .. }
3642 ));
3643 thread.read_with(cx, |thread, _cx| {
3644 assert_eq!(
3645 thread.to_markdown(),
3646 indoc! {"
3647 ## User
3648
3649 Hello!
3650
3651 ## Assistant
3652
3653 Hey,
3654
3655 [resume]
3656
3657 ## Assistant
3658
3659 there!
3660 "}
3661 )
3662 });
3663}
3664
3665#[gpui::test]
3666async fn test_send_retry_finishes_tool_calls_on_error(cx: &mut TestAppContext) {
3667 let ThreadTest { thread, model, .. } = setup(cx, TestModel::Fake).await;
3668 let fake_model = model.as_fake();
3669
3670 let events = thread
3671 .update(cx, |thread, cx| {
3672 thread.add_tool(EchoTool);
3673 thread.send(UserMessageId::new(), ["Call the echo tool!"], cx)
3674 })
3675 .unwrap();
3676 cx.run_until_parked();
3677
3678 let tool_use_1 = LanguageModelToolUse {
3679 id: "tool_1".into(),
3680 name: EchoTool::NAME.into(),
3681 raw_input: json!({"text": "test"}).to_string(),
3682 input: json!({"text": "test"}),
3683 is_input_complete: true,
3684 thought_signature: None,
3685 };
3686 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
3687 tool_use_1.clone(),
3688 ));
3689 fake_model.send_last_completion_stream_error(LanguageModelCompletionError::ServerOverloaded {
3690 provider: LanguageModelProviderName::new("Anthropic"),
3691 retry_after: Some(Duration::from_secs(3)),
3692 });
3693 fake_model.end_last_completion_stream();
3694
3695 cx.executor().advance_clock(Duration::from_secs(3));
3696 let completion = fake_model.pending_completions().pop().unwrap();
3697 assert_eq!(
3698 completion.messages[1..],
3699 vec![
3700 LanguageModelRequestMessage {
3701 role: Role::User,
3702 content: vec!["Call the echo tool!".into()],
3703 cache: false,
3704 reasoning_details: None,
3705 },
3706 LanguageModelRequestMessage {
3707 role: Role::Assistant,
3708 content: vec![language_model::MessageContent::ToolUse(tool_use_1.clone())],
3709 cache: false,
3710 reasoning_details: None,
3711 },
3712 LanguageModelRequestMessage {
3713 role: Role::User,
3714 content: vec![language_model::MessageContent::ToolResult(
3715 LanguageModelToolResult {
3716 tool_use_id: tool_use_1.id.clone(),
3717 tool_name: tool_use_1.name.clone(),
3718 is_error: false,
3719 content: "test".into(),
3720 output: Some("test".into())
3721 }
3722 )],
3723 cache: true,
3724 reasoning_details: None,
3725 },
3726 ]
3727 );
3728
3729 fake_model.send_last_completion_stream_text_chunk("Done");
3730 fake_model.end_last_completion_stream();
3731 cx.run_until_parked();
3732 events.collect::<Vec<_>>().await;
3733 thread.read_with(cx, |thread, _cx| {
3734 assert_eq!(
3735 thread.last_received_or_pending_message(),
3736 Some(Message::Agent(AgentMessage {
3737 content: vec![AgentMessageContent::Text("Done".into())],
3738 tool_results: IndexMap::default(),
3739 reasoning_details: None,
3740 }))
3741 );
3742 })
3743}
3744
3745#[gpui::test]
3746async fn test_send_max_retries_exceeded(cx: &mut TestAppContext) {
3747 let ThreadTest { thread, model, .. } = setup(cx, TestModel::Fake).await;
3748 let fake_model = model.as_fake();
3749
3750 let mut events = thread
3751 .update(cx, |thread, cx| {
3752 thread.send(UserMessageId::new(), ["Hello!"], cx)
3753 })
3754 .unwrap();
3755 cx.run_until_parked();
3756
3757 for _ in 0..crate::thread::MAX_RETRY_ATTEMPTS + 1 {
3758 fake_model.send_last_completion_stream_error(
3759 LanguageModelCompletionError::ServerOverloaded {
3760 provider: LanguageModelProviderName::new("Anthropic"),
3761 retry_after: Some(Duration::from_secs(3)),
3762 },
3763 );
3764 fake_model.end_last_completion_stream();
3765 cx.executor().advance_clock(Duration::from_secs(3));
3766 cx.run_until_parked();
3767 }
3768
3769 let mut errors = Vec::new();
3770 let mut retry_events = Vec::new();
3771 while let Some(event) = events.next().await {
3772 match event {
3773 Ok(ThreadEvent::Retry(retry_status)) => {
3774 retry_events.push(retry_status);
3775 }
3776 Ok(ThreadEvent::Stop(..)) => break,
3777 Err(error) => errors.push(error),
3778 _ => {}
3779 }
3780 }
3781
3782 assert_eq!(
3783 retry_events.len(),
3784 crate::thread::MAX_RETRY_ATTEMPTS as usize
3785 );
3786 for i in 0..crate::thread::MAX_RETRY_ATTEMPTS as usize {
3787 assert_eq!(retry_events[i].attempt, i + 1);
3788 }
3789 assert_eq!(errors.len(), 1);
3790 let error = errors[0]
3791 .downcast_ref::<LanguageModelCompletionError>()
3792 .unwrap();
3793 assert!(matches!(
3794 error,
3795 LanguageModelCompletionError::ServerOverloaded { .. }
3796 ));
3797}
3798
3799#[gpui::test]
3800async fn test_streaming_tool_completes_when_llm_stream_ends_without_final_input(
3801 cx: &mut TestAppContext,
3802) {
3803 init_test(cx);
3804 always_allow_tools(cx);
3805
3806 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
3807 let fake_model = model.as_fake();
3808
3809 thread.update(cx, |thread, _cx| {
3810 thread.add_tool(StreamingEchoTool::new());
3811 });
3812
3813 let _events = thread
3814 .update(cx, |thread, cx| {
3815 thread.send(UserMessageId::new(), ["Use the streaming_echo tool"], cx)
3816 })
3817 .unwrap();
3818 cx.run_until_parked();
3819
3820 // Send a partial tool use (is_input_complete = false), simulating the LLM
3821 // streaming input for a tool.
3822 let tool_use = LanguageModelToolUse {
3823 id: "tool_1".into(),
3824 name: "streaming_echo".into(),
3825 raw_input: r#"{"text": "partial"}"#.into(),
3826 input: json!({"text": "partial"}),
3827 is_input_complete: false,
3828 thought_signature: None,
3829 };
3830 fake_model
3831 .send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(tool_use.clone()));
3832 cx.run_until_parked();
3833
3834 // Send a stream error WITHOUT ever sending is_input_complete = true.
3835 // Before the fix, this would deadlock: the tool waits for more partials
3836 // (or cancellation), run_turn_internal waits for the tool, and the sender
3837 // keeping the channel open lives inside RunningTurn.
3838 fake_model.send_last_completion_stream_error(
3839 LanguageModelCompletionError::UpstreamProviderError {
3840 message: "Internal server error".to_string(),
3841 status: http_client::StatusCode::INTERNAL_SERVER_ERROR,
3842 retry_after: None,
3843 },
3844 );
3845 fake_model.end_last_completion_stream();
3846
3847 // Advance past the retry delay so run_turn_internal retries.
3848 cx.executor().advance_clock(Duration::from_secs(5));
3849 cx.run_until_parked();
3850
3851 // The retry request should contain the streaming tool's error result,
3852 // proving the tool terminated and its result was forwarded.
3853 let completion = fake_model
3854 .pending_completions()
3855 .pop()
3856 .expect("No running turn");
3857 assert_eq!(
3858 completion.messages[1..],
3859 vec![
3860 LanguageModelRequestMessage {
3861 role: Role::User,
3862 content: vec!["Use the streaming_echo tool".into()],
3863 cache: false,
3864 reasoning_details: None,
3865 },
3866 LanguageModelRequestMessage {
3867 role: Role::Assistant,
3868 content: vec![language_model::MessageContent::ToolUse(tool_use.clone())],
3869 cache: false,
3870 reasoning_details: None,
3871 },
3872 LanguageModelRequestMessage {
3873 role: Role::User,
3874 content: vec![language_model::MessageContent::ToolResult(
3875 LanguageModelToolResult {
3876 tool_use_id: tool_use.id.clone(),
3877 tool_name: tool_use.name,
3878 is_error: true,
3879 content: "Failed to receive tool input: tool input was not fully received"
3880 .into(),
3881 output: Some(
3882 "Failed to receive tool input: tool input was not fully received"
3883 .into()
3884 ),
3885 }
3886 )],
3887 cache: true,
3888 reasoning_details: None,
3889 },
3890 ]
3891 );
3892
3893 // Finish the retry round so the turn completes cleanly.
3894 fake_model.send_last_completion_stream_text_chunk("Done");
3895 fake_model.end_last_completion_stream();
3896 cx.run_until_parked();
3897
3898 thread.read_with(cx, |thread, _cx| {
3899 assert!(
3900 thread.is_turn_complete(),
3901 "Thread should not be stuck; the turn should have completed",
3902 );
3903 });
3904}
3905
3906/// Filters out the stop events for asserting against in tests
3907fn stop_events(result_events: Vec<Result<ThreadEvent>>) -> Vec<acp::StopReason> {
3908 result_events
3909 .into_iter()
3910 .filter_map(|event| match event.unwrap() {
3911 ThreadEvent::Stop(stop_reason) => Some(stop_reason),
3912 _ => None,
3913 })
3914 .collect()
3915}
3916
3917struct ThreadTest {
3918 model: Arc<dyn LanguageModel>,
3919 thread: Entity<Thread>,
3920 project_context: Entity<ProjectContext>,
3921 context_server_store: Entity<ContextServerStore>,
3922 fs: Arc<FakeFs>,
3923}
3924
3925enum TestModel {
3926 Sonnet4,
3927 Fake,
3928}
3929
3930impl TestModel {
3931 fn id(&self) -> LanguageModelId {
3932 match self {
3933 TestModel::Sonnet4 => LanguageModelId("claude-sonnet-4-latest".into()),
3934 TestModel::Fake => unreachable!(),
3935 }
3936 }
3937}
3938
3939async fn setup(cx: &mut TestAppContext, model: TestModel) -> ThreadTest {
3940 cx.executor().allow_parking();
3941
3942 let fs = FakeFs::new(cx.background_executor.clone());
3943 fs.create_dir(paths::settings_file().parent().unwrap())
3944 .await
3945 .unwrap();
3946 fs.insert_file(
3947 paths::settings_file(),
3948 json!({
3949 "agent": {
3950 "default_profile": "test-profile",
3951 "profiles": {
3952 "test-profile": {
3953 "name": "Test Profile",
3954 "tools": {
3955 EchoTool::NAME: true,
3956 DelayTool::NAME: true,
3957 WordListTool::NAME: true,
3958 ToolRequiringPermission::NAME: true,
3959 InfiniteTool::NAME: true,
3960 CancellationAwareTool::NAME: true,
3961 StreamingEchoTool::NAME: true,
3962 StreamingFailingEchoTool::NAME: true,
3963 TerminalTool::NAME: true,
3964 UpdatePlanTool::NAME: true,
3965 }
3966 }
3967 }
3968 }
3969 })
3970 .to_string()
3971 .into_bytes(),
3972 )
3973 .await;
3974
3975 cx.update(|cx| {
3976 settings::init(cx);
3977
3978 match model {
3979 TestModel::Fake => {}
3980 TestModel::Sonnet4 => {
3981 gpui_tokio::init(cx);
3982 let http_client = ReqwestClient::user_agent("agent tests").unwrap();
3983 cx.set_http_client(Arc::new(http_client));
3984 let client = Client::production(cx);
3985 let user_store = cx.new(|cx| UserStore::new(client.clone(), cx));
3986 language_model::init(cx);
3987 RefreshLlmTokenListener::register(client.clone(), user_store.clone(), cx);
3988 language_models::init(user_store, client.clone(), cx);
3989 }
3990 };
3991
3992 watch_settings(fs.clone(), cx);
3993 });
3994
3995 let templates = Templates::new();
3996
3997 fs.insert_tree(path!("/test"), json!({})).await;
3998 let project = Project::test(fs.clone(), [path!("/test").as_ref()], cx).await;
3999
4000 let model = cx
4001 .update(|cx| {
4002 if let TestModel::Fake = model {
4003 Task::ready(Arc::new(FakeLanguageModel::default()) as Arc<_>)
4004 } else {
4005 let model_id = model.id();
4006 let models = LanguageModelRegistry::read_global(cx);
4007 let model = models
4008 .available_models(cx)
4009 .find(|model| model.id() == model_id)
4010 .unwrap();
4011
4012 let provider = models.provider(&model.provider_id()).unwrap();
4013 let authenticated = provider.authenticate(cx);
4014
4015 cx.spawn(async move |_cx| {
4016 authenticated.await.unwrap();
4017 model
4018 })
4019 }
4020 })
4021 .await;
4022
4023 let project_context = cx.new(|_cx| ProjectContext::default());
4024 let context_server_store = project.read_with(cx, |project, _| project.context_server_store());
4025 let context_server_registry =
4026 cx.new(|cx| ContextServerRegistry::new(context_server_store.clone(), cx));
4027 let thread = cx.new(|cx| {
4028 Thread::new(
4029 project,
4030 project_context.clone(),
4031 context_server_registry,
4032 templates,
4033 Some(model.clone()),
4034 cx,
4035 )
4036 });
4037 ThreadTest {
4038 model,
4039 thread,
4040 project_context,
4041 context_server_store,
4042 fs,
4043 }
4044}
4045
4046#[cfg(test)]
4047#[ctor::ctor]
4048fn init_logger() {
4049 if std::env::var("RUST_LOG").is_ok() {
4050 env_logger::init();
4051 }
4052}
4053
4054fn watch_settings(fs: Arc<dyn Fs>, cx: &mut App) {
4055 let fs = fs.clone();
4056 cx.spawn({
4057 async move |cx| {
4058 let (mut new_settings_content_rx, watcher_task) = settings::watch_config_file(
4059 cx.background_executor(),
4060 fs,
4061 paths::settings_file().clone(),
4062 );
4063 let _watcher_task = watcher_task;
4064
4065 while let Some(new_settings_content) = new_settings_content_rx.next().await {
4066 cx.update(|cx| {
4067 SettingsStore::update_global(cx, |settings, cx| {
4068 settings.set_user_settings(&new_settings_content, cx)
4069 })
4070 })
4071 .ok();
4072 }
4073 }
4074 })
4075 .detach();
4076}
4077
4078fn tool_names_for_completion(completion: &LanguageModelRequest) -> Vec<String> {
4079 completion
4080 .tools
4081 .iter()
4082 .map(|tool| tool.name.clone())
4083 .collect()
4084}
4085
4086fn setup_context_server(
4087 name: &'static str,
4088 tools: Vec<context_server::types::Tool>,
4089 context_server_store: &Entity<ContextServerStore>,
4090 cx: &mut TestAppContext,
4091) -> mpsc::UnboundedReceiver<(
4092 context_server::types::CallToolParams,
4093 oneshot::Sender<context_server::types::CallToolResponse>,
4094)> {
4095 cx.update(|cx| {
4096 let mut settings = ProjectSettings::get_global(cx).clone();
4097 settings.context_servers.insert(
4098 name.into(),
4099 project::project_settings::ContextServerSettings::Stdio {
4100 enabled: true,
4101 remote: false,
4102 command: ContextServerCommand {
4103 path: "somebinary".into(),
4104 args: Vec::new(),
4105 env: None,
4106 timeout: None,
4107 },
4108 },
4109 );
4110 ProjectSettings::override_global(settings, cx);
4111 });
4112
4113 let (mcp_tool_calls_tx, mcp_tool_calls_rx) = mpsc::unbounded();
4114 let fake_transport = context_server::test::create_fake_transport(name, cx.executor())
4115 .on_request::<context_server::types::requests::Initialize, _>(move |_params| async move {
4116 context_server::types::InitializeResponse {
4117 protocol_version: context_server::types::ProtocolVersion(
4118 context_server::types::LATEST_PROTOCOL_VERSION.to_string(),
4119 ),
4120 server_info: context_server::types::Implementation {
4121 name: name.into(),
4122 version: "1.0.0".to_string(),
4123 },
4124 capabilities: context_server::types::ServerCapabilities {
4125 tools: Some(context_server::types::ToolsCapabilities {
4126 list_changed: Some(true),
4127 }),
4128 ..Default::default()
4129 },
4130 meta: None,
4131 }
4132 })
4133 .on_request::<context_server::types::requests::ListTools, _>(move |_params| {
4134 let tools = tools.clone();
4135 async move {
4136 context_server::types::ListToolsResponse {
4137 tools,
4138 next_cursor: None,
4139 meta: None,
4140 }
4141 }
4142 })
4143 .on_request::<context_server::types::requests::CallTool, _>(move |params| {
4144 let mcp_tool_calls_tx = mcp_tool_calls_tx.clone();
4145 async move {
4146 let (response_tx, response_rx) = oneshot::channel();
4147 mcp_tool_calls_tx
4148 .unbounded_send((params, response_tx))
4149 .unwrap();
4150 response_rx.await.unwrap()
4151 }
4152 });
4153 context_server_store.update(cx, |store, cx| {
4154 store.start_server(
4155 Arc::new(ContextServer::new(
4156 ContextServerId(name.into()),
4157 Arc::new(fake_transport),
4158 )),
4159 cx,
4160 );
4161 });
4162 cx.run_until_parked();
4163 mcp_tool_calls_rx
4164}
4165
4166#[gpui::test]
4167async fn test_tokens_before_message(cx: &mut TestAppContext) {
4168 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
4169 let fake_model = model.as_fake();
4170
4171 // First message
4172 let message_1_id = UserMessageId::new();
4173 thread
4174 .update(cx, |thread, cx| {
4175 thread.send(message_1_id.clone(), ["First message"], cx)
4176 })
4177 .unwrap();
4178 cx.run_until_parked();
4179
4180 // Before any response, tokens_before_message should return None for first message
4181 thread.read_with(cx, |thread, _| {
4182 assert_eq!(
4183 thread.tokens_before_message(&message_1_id),
4184 None,
4185 "First message should have no tokens before it"
4186 );
4187 });
4188
4189 // Complete first message with usage
4190 fake_model.send_last_completion_stream_text_chunk("Response 1");
4191 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::UsageUpdate(
4192 language_model::TokenUsage {
4193 input_tokens: 100,
4194 output_tokens: 50,
4195 cache_creation_input_tokens: 0,
4196 cache_read_input_tokens: 0,
4197 },
4198 ));
4199 fake_model.end_last_completion_stream();
4200 cx.run_until_parked();
4201
4202 // First message still has no tokens before it
4203 thread.read_with(cx, |thread, _| {
4204 assert_eq!(
4205 thread.tokens_before_message(&message_1_id),
4206 None,
4207 "First message should still have no tokens before it after response"
4208 );
4209 });
4210
4211 // Second message
4212 let message_2_id = UserMessageId::new();
4213 thread
4214 .update(cx, |thread, cx| {
4215 thread.send(message_2_id.clone(), ["Second message"], cx)
4216 })
4217 .unwrap();
4218 cx.run_until_parked();
4219
4220 // Second message should have first message's input tokens before it
4221 thread.read_with(cx, |thread, _| {
4222 assert_eq!(
4223 thread.tokens_before_message(&message_2_id),
4224 Some(100),
4225 "Second message should have 100 tokens before it (from first request)"
4226 );
4227 });
4228
4229 // Complete second message
4230 fake_model.send_last_completion_stream_text_chunk("Response 2");
4231 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::UsageUpdate(
4232 language_model::TokenUsage {
4233 input_tokens: 250, // Total for this request (includes previous context)
4234 output_tokens: 75,
4235 cache_creation_input_tokens: 0,
4236 cache_read_input_tokens: 0,
4237 },
4238 ));
4239 fake_model.end_last_completion_stream();
4240 cx.run_until_parked();
4241
4242 // Third message
4243 let message_3_id = UserMessageId::new();
4244 thread
4245 .update(cx, |thread, cx| {
4246 thread.send(message_3_id.clone(), ["Third message"], cx)
4247 })
4248 .unwrap();
4249 cx.run_until_parked();
4250
4251 // Third message should have second message's input tokens (250) before it
4252 thread.read_with(cx, |thread, _| {
4253 assert_eq!(
4254 thread.tokens_before_message(&message_3_id),
4255 Some(250),
4256 "Third message should have 250 tokens before it (from second request)"
4257 );
4258 // Second message should still have 100
4259 assert_eq!(
4260 thread.tokens_before_message(&message_2_id),
4261 Some(100),
4262 "Second message should still have 100 tokens before it"
4263 );
4264 // First message still has none
4265 assert_eq!(
4266 thread.tokens_before_message(&message_1_id),
4267 None,
4268 "First message should still have no tokens before it"
4269 );
4270 });
4271}
4272
4273#[gpui::test]
4274async fn test_tokens_before_message_after_truncate(cx: &mut TestAppContext) {
4275 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
4276 let fake_model = model.as_fake();
4277
4278 // Set up three messages with responses
4279 let message_1_id = UserMessageId::new();
4280 thread
4281 .update(cx, |thread, cx| {
4282 thread.send(message_1_id.clone(), ["Message 1"], cx)
4283 })
4284 .unwrap();
4285 cx.run_until_parked();
4286 fake_model.send_last_completion_stream_text_chunk("Response 1");
4287 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::UsageUpdate(
4288 language_model::TokenUsage {
4289 input_tokens: 100,
4290 output_tokens: 50,
4291 cache_creation_input_tokens: 0,
4292 cache_read_input_tokens: 0,
4293 },
4294 ));
4295 fake_model.end_last_completion_stream();
4296 cx.run_until_parked();
4297
4298 let message_2_id = UserMessageId::new();
4299 thread
4300 .update(cx, |thread, cx| {
4301 thread.send(message_2_id.clone(), ["Message 2"], cx)
4302 })
4303 .unwrap();
4304 cx.run_until_parked();
4305 fake_model.send_last_completion_stream_text_chunk("Response 2");
4306 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::UsageUpdate(
4307 language_model::TokenUsage {
4308 input_tokens: 250,
4309 output_tokens: 75,
4310 cache_creation_input_tokens: 0,
4311 cache_read_input_tokens: 0,
4312 },
4313 ));
4314 fake_model.end_last_completion_stream();
4315 cx.run_until_parked();
4316
4317 // Verify initial state
4318 thread.read_with(cx, |thread, _| {
4319 assert_eq!(thread.tokens_before_message(&message_2_id), Some(100));
4320 });
4321
4322 // Truncate at message 2 (removes message 2 and everything after)
4323 thread
4324 .update(cx, |thread, cx| thread.truncate(message_2_id.clone(), cx))
4325 .unwrap();
4326 cx.run_until_parked();
4327
4328 // After truncation, message_2_id no longer exists, so lookup should return None
4329 thread.read_with(cx, |thread, _| {
4330 assert_eq!(
4331 thread.tokens_before_message(&message_2_id),
4332 None,
4333 "After truncation, message 2 no longer exists"
4334 );
4335 // Message 1 still exists but has no tokens before it
4336 assert_eq!(
4337 thread.tokens_before_message(&message_1_id),
4338 None,
4339 "First message still has no tokens before it"
4340 );
4341 });
4342}
4343
4344#[gpui::test]
4345async fn test_terminal_tool_permission_rules(cx: &mut TestAppContext) {
4346 init_test(cx);
4347
4348 let fs = FakeFs::new(cx.executor());
4349 fs.insert_tree("/root", json!({})).await;
4350 let project = Project::test(fs, ["/root".as_ref()], cx).await;
4351
4352 // Test 1: Deny rule blocks command
4353 {
4354 let environment = Rc::new(cx.update(|cx| {
4355 FakeThreadEnvironment::default().with_terminal(FakeTerminalHandle::new_never_exits(cx))
4356 }));
4357
4358 cx.update(|cx| {
4359 let mut settings = agent_settings::AgentSettings::get_global(cx).clone();
4360 settings.tool_permissions.tools.insert(
4361 TerminalTool::NAME.into(),
4362 agent_settings::ToolRules {
4363 default: Some(settings::ToolPermissionMode::Confirm),
4364 always_allow: vec![],
4365 always_deny: vec![
4366 agent_settings::CompiledRegex::new(r"rm\s+-rf", false).unwrap(),
4367 ],
4368 always_confirm: vec![],
4369 invalid_patterns: vec![],
4370 },
4371 );
4372 agent_settings::AgentSettings::override_global(settings, cx);
4373 });
4374
4375 #[allow(clippy::arc_with_non_send_sync)]
4376 let tool = Arc::new(crate::TerminalTool::new(project.clone(), environment));
4377 let (event_stream, _rx) = crate::ToolCallEventStream::test();
4378
4379 let task = cx.update(|cx| {
4380 tool.run(
4381 ToolInput::resolved(crate::TerminalToolInput {
4382 command: "rm -rf /".to_string(),
4383 cd: ".".to_string(),
4384 timeout_ms: None,
4385 }),
4386 event_stream,
4387 cx,
4388 )
4389 });
4390
4391 let result = task.await;
4392 assert!(
4393 result.is_err(),
4394 "expected command to be blocked by deny rule"
4395 );
4396 let err_msg = result.unwrap_err().to_lowercase();
4397 assert!(
4398 err_msg.contains("blocked"),
4399 "error should mention the command was blocked"
4400 );
4401 }
4402
4403 // Test 2: Allow rule skips confirmation (and overrides default: Deny)
4404 {
4405 let environment = Rc::new(cx.update(|cx| {
4406 FakeThreadEnvironment::default()
4407 .with_terminal(FakeTerminalHandle::new_with_immediate_exit(cx, 0))
4408 }));
4409
4410 cx.update(|cx| {
4411 let mut settings = agent_settings::AgentSettings::get_global(cx).clone();
4412 settings.tool_permissions.tools.insert(
4413 TerminalTool::NAME.into(),
4414 agent_settings::ToolRules {
4415 default: Some(settings::ToolPermissionMode::Deny),
4416 always_allow: vec![
4417 agent_settings::CompiledRegex::new(r"^echo\s", false).unwrap(),
4418 ],
4419 always_deny: vec![],
4420 always_confirm: vec![],
4421 invalid_patterns: vec![],
4422 },
4423 );
4424 agent_settings::AgentSettings::override_global(settings, cx);
4425 });
4426
4427 #[allow(clippy::arc_with_non_send_sync)]
4428 let tool = Arc::new(crate::TerminalTool::new(project.clone(), environment));
4429 let (event_stream, mut rx) = crate::ToolCallEventStream::test();
4430
4431 let task = cx.update(|cx| {
4432 tool.run(
4433 ToolInput::resolved(crate::TerminalToolInput {
4434 command: "echo hello".to_string(),
4435 cd: ".".to_string(),
4436 timeout_ms: None,
4437 }),
4438 event_stream,
4439 cx,
4440 )
4441 });
4442
4443 let update = rx.expect_update_fields().await;
4444 assert!(
4445 update.content.iter().any(|blocks| {
4446 blocks
4447 .iter()
4448 .any(|c| matches!(c, acp::ToolCallContent::Terminal(_)))
4449 }),
4450 "expected terminal content (allow rule should skip confirmation and override default deny)"
4451 );
4452
4453 let result = task.await;
4454 assert!(
4455 result.is_ok(),
4456 "expected command to succeed without confirmation"
4457 );
4458 }
4459
4460 // Test 3: global default: allow does NOT override always_confirm patterns
4461 {
4462 let environment = Rc::new(cx.update(|cx| {
4463 FakeThreadEnvironment::default()
4464 .with_terminal(FakeTerminalHandle::new_with_immediate_exit(cx, 0))
4465 }));
4466
4467 cx.update(|cx| {
4468 let mut settings = agent_settings::AgentSettings::get_global(cx).clone();
4469 settings.tool_permissions.default = settings::ToolPermissionMode::Allow;
4470 settings.tool_permissions.tools.insert(
4471 TerminalTool::NAME.into(),
4472 agent_settings::ToolRules {
4473 default: Some(settings::ToolPermissionMode::Allow),
4474 always_allow: vec![],
4475 always_deny: vec![],
4476 always_confirm: vec![
4477 agent_settings::CompiledRegex::new(r"sudo", false).unwrap(),
4478 ],
4479 invalid_patterns: vec![],
4480 },
4481 );
4482 agent_settings::AgentSettings::override_global(settings, cx);
4483 });
4484
4485 #[allow(clippy::arc_with_non_send_sync)]
4486 let tool = Arc::new(crate::TerminalTool::new(project.clone(), environment));
4487 let (event_stream, mut rx) = crate::ToolCallEventStream::test();
4488
4489 let _task = cx.update(|cx| {
4490 tool.run(
4491 ToolInput::resolved(crate::TerminalToolInput {
4492 command: "sudo rm file".to_string(),
4493 cd: ".".to_string(),
4494 timeout_ms: None,
4495 }),
4496 event_stream,
4497 cx,
4498 )
4499 });
4500
4501 // With global default: allow, confirm patterns are still respected
4502 // The expect_authorization() call will panic if no authorization is requested,
4503 // which validates that the confirm pattern still triggers confirmation
4504 let _auth = rx.expect_authorization().await;
4505
4506 drop(_task);
4507 }
4508
4509 // Test 4: tool-specific default: deny is respected even with global default: allow
4510 {
4511 let environment = Rc::new(cx.update(|cx| {
4512 FakeThreadEnvironment::default()
4513 .with_terminal(FakeTerminalHandle::new_with_immediate_exit(cx, 0))
4514 }));
4515
4516 cx.update(|cx| {
4517 let mut settings = agent_settings::AgentSettings::get_global(cx).clone();
4518 settings.tool_permissions.default = settings::ToolPermissionMode::Allow;
4519 settings.tool_permissions.tools.insert(
4520 TerminalTool::NAME.into(),
4521 agent_settings::ToolRules {
4522 default: Some(settings::ToolPermissionMode::Deny),
4523 always_allow: vec![],
4524 always_deny: vec![],
4525 always_confirm: vec![],
4526 invalid_patterns: vec![],
4527 },
4528 );
4529 agent_settings::AgentSettings::override_global(settings, cx);
4530 });
4531
4532 #[allow(clippy::arc_with_non_send_sync)]
4533 let tool = Arc::new(crate::TerminalTool::new(project.clone(), environment));
4534 let (event_stream, _rx) = crate::ToolCallEventStream::test();
4535
4536 let task = cx.update(|cx| {
4537 tool.run(
4538 ToolInput::resolved(crate::TerminalToolInput {
4539 command: "echo hello".to_string(),
4540 cd: ".".to_string(),
4541 timeout_ms: None,
4542 }),
4543 event_stream,
4544 cx,
4545 )
4546 });
4547
4548 // tool-specific default: deny is respected even with global default: allow
4549 let result = task.await;
4550 assert!(
4551 result.is_err(),
4552 "expected command to be blocked by tool-specific deny default"
4553 );
4554 let err_msg = result.unwrap_err().to_lowercase();
4555 assert!(
4556 err_msg.contains("disabled"),
4557 "error should mention the tool is disabled, got: {err_msg}"
4558 );
4559 }
4560}
4561
4562#[gpui::test]
4563async fn test_subagent_tool_call_end_to_end(cx: &mut TestAppContext) {
4564 init_test(cx);
4565 cx.update(|cx| {
4566 LanguageModelRegistry::test(cx);
4567 });
4568 cx.update(|cx| {
4569 cx.update_flags(true, vec!["subagents".to_string()]);
4570 });
4571
4572 let fs = FakeFs::new(cx.executor());
4573 fs.insert_tree(
4574 "/",
4575 json!({
4576 "a": {
4577 "b.md": "Lorem"
4578 }
4579 }),
4580 )
4581 .await;
4582 let project = Project::test(fs.clone(), [path!("/a").as_ref()], cx).await;
4583 let thread_store = cx.new(|cx| ThreadStore::new(cx));
4584 let agent = cx.update(|cx| {
4585 NativeAgent::new(thread_store.clone(), Templates::new(), None, fs.clone(), cx)
4586 });
4587 let connection = Rc::new(NativeAgentConnection(agent.clone()));
4588
4589 let acp_thread = cx
4590 .update(|cx| {
4591 connection
4592 .clone()
4593 .new_session(project.clone(), PathList::new(&[Path::new("")]), cx)
4594 })
4595 .await
4596 .unwrap();
4597 let session_id = acp_thread.read_with(cx, |thread, _| thread.session_id().clone());
4598 let thread = agent.read_with(cx, |agent, _| {
4599 agent.sessions.get(&session_id).unwrap().thread.clone()
4600 });
4601 let model = Arc::new(FakeLanguageModel::default());
4602
4603 // Ensure empty threads are not saved, even if they get mutated.
4604 thread.update(cx, |thread, cx| {
4605 thread.set_model(model.clone(), cx);
4606 });
4607 cx.run_until_parked();
4608
4609 let send = acp_thread.update(cx, |thread, cx| thread.send_raw("Prompt", cx));
4610 cx.run_until_parked();
4611 model.send_last_completion_stream_text_chunk("spawning subagent");
4612 let subagent_tool_input = SpawnAgentToolInput {
4613 label: "label".to_string(),
4614 message: "subagent task prompt".to_string(),
4615 session_id: None,
4616 };
4617 let subagent_tool_use = LanguageModelToolUse {
4618 id: "subagent_1".into(),
4619 name: SpawnAgentTool::NAME.into(),
4620 raw_input: serde_json::to_string(&subagent_tool_input).unwrap(),
4621 input: serde_json::to_value(&subagent_tool_input).unwrap(),
4622 is_input_complete: true,
4623 thought_signature: None,
4624 };
4625 model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
4626 subagent_tool_use,
4627 ));
4628 model.end_last_completion_stream();
4629
4630 cx.run_until_parked();
4631
4632 let subagent_session_id = thread.read_with(cx, |thread, cx| {
4633 thread
4634 .running_subagent_ids(cx)
4635 .get(0)
4636 .expect("subagent thread should be running")
4637 .clone()
4638 });
4639
4640 let subagent_thread = agent.read_with(cx, |agent, _cx| {
4641 agent
4642 .sessions
4643 .get(&subagent_session_id)
4644 .expect("subagent session should exist")
4645 .acp_thread
4646 .clone()
4647 });
4648
4649 model.send_last_completion_stream_text_chunk("subagent task response");
4650 model.end_last_completion_stream();
4651
4652 cx.run_until_parked();
4653
4654 assert_eq!(
4655 subagent_thread.read_with(cx, |thread, cx| thread.to_markdown(cx)),
4656 indoc! {"
4657 ## User
4658
4659 subagent task prompt
4660
4661 ## Assistant
4662
4663 subagent task response
4664
4665 "}
4666 );
4667
4668 model.send_last_completion_stream_text_chunk("Response");
4669 model.end_last_completion_stream();
4670
4671 send.await.unwrap();
4672
4673 assert_eq!(
4674 acp_thread.read_with(cx, |thread, cx| thread.to_markdown(cx)),
4675 indoc! {r#"
4676 ## User
4677
4678 Prompt
4679
4680 ## Assistant
4681
4682 spawning subagent
4683
4684 **Tool Call: label**
4685 Status: Completed
4686
4687 subagent task response
4688
4689 ## Assistant
4690
4691 Response
4692
4693 "#},
4694 );
4695}
4696
4697#[gpui::test]
4698async fn test_subagent_tool_output_does_not_include_thinking(cx: &mut TestAppContext) {
4699 init_test(cx);
4700 cx.update(|cx| {
4701 LanguageModelRegistry::test(cx);
4702 });
4703 cx.update(|cx| {
4704 cx.update_flags(true, vec!["subagents".to_string()]);
4705 });
4706
4707 let fs = FakeFs::new(cx.executor());
4708 fs.insert_tree(
4709 "/",
4710 json!({
4711 "a": {
4712 "b.md": "Lorem"
4713 }
4714 }),
4715 )
4716 .await;
4717 let project = Project::test(fs.clone(), [path!("/a").as_ref()], cx).await;
4718 let thread_store = cx.new(|cx| ThreadStore::new(cx));
4719 let agent = cx.update(|cx| {
4720 NativeAgent::new(thread_store.clone(), Templates::new(), None, fs.clone(), cx)
4721 });
4722 let connection = Rc::new(NativeAgentConnection(agent.clone()));
4723
4724 let acp_thread = cx
4725 .update(|cx| {
4726 connection
4727 .clone()
4728 .new_session(project.clone(), PathList::new(&[Path::new("")]), cx)
4729 })
4730 .await
4731 .unwrap();
4732 let session_id = acp_thread.read_with(cx, |thread, _| thread.session_id().clone());
4733 let thread = agent.read_with(cx, |agent, _| {
4734 agent.sessions.get(&session_id).unwrap().thread.clone()
4735 });
4736 let model = Arc::new(FakeLanguageModel::default());
4737
4738 // Ensure empty threads are not saved, even if they get mutated.
4739 thread.update(cx, |thread, cx| {
4740 thread.set_model(model.clone(), cx);
4741 });
4742 cx.run_until_parked();
4743
4744 let send = acp_thread.update(cx, |thread, cx| thread.send_raw("Prompt", cx));
4745 cx.run_until_parked();
4746 model.send_last_completion_stream_text_chunk("spawning subagent");
4747 let subagent_tool_input = SpawnAgentToolInput {
4748 label: "label".to_string(),
4749 message: "subagent task prompt".to_string(),
4750 session_id: None,
4751 };
4752 let subagent_tool_use = LanguageModelToolUse {
4753 id: "subagent_1".into(),
4754 name: SpawnAgentTool::NAME.into(),
4755 raw_input: serde_json::to_string(&subagent_tool_input).unwrap(),
4756 input: serde_json::to_value(&subagent_tool_input).unwrap(),
4757 is_input_complete: true,
4758 thought_signature: None,
4759 };
4760 model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
4761 subagent_tool_use,
4762 ));
4763 model.end_last_completion_stream();
4764
4765 cx.run_until_parked();
4766
4767 let subagent_session_id = thread.read_with(cx, |thread, cx| {
4768 thread
4769 .running_subagent_ids(cx)
4770 .get(0)
4771 .expect("subagent thread should be running")
4772 .clone()
4773 });
4774
4775 let subagent_thread = agent.read_with(cx, |agent, _cx| {
4776 agent
4777 .sessions
4778 .get(&subagent_session_id)
4779 .expect("subagent session should exist")
4780 .acp_thread
4781 .clone()
4782 });
4783
4784 model.send_last_completion_stream_text_chunk("subagent task response 1");
4785 model.send_last_completion_stream_event(LanguageModelCompletionEvent::Thinking {
4786 text: "thinking more about the subagent task".into(),
4787 signature: None,
4788 });
4789 model.send_last_completion_stream_text_chunk("subagent task response 2");
4790 model.end_last_completion_stream();
4791
4792 cx.run_until_parked();
4793
4794 assert_eq!(
4795 subagent_thread.read_with(cx, |thread, cx| thread.to_markdown(cx)),
4796 indoc! {"
4797 ## User
4798
4799 subagent task prompt
4800
4801 ## Assistant
4802
4803 subagent task response 1
4804
4805 <thinking>
4806 thinking more about the subagent task
4807 </thinking>
4808
4809 subagent task response 2
4810
4811 "}
4812 );
4813
4814 model.send_last_completion_stream_text_chunk("Response");
4815 model.end_last_completion_stream();
4816
4817 send.await.unwrap();
4818
4819 assert_eq!(
4820 acp_thread.read_with(cx, |thread, cx| thread.to_markdown(cx)),
4821 indoc! {r#"
4822 ## User
4823
4824 Prompt
4825
4826 ## Assistant
4827
4828 spawning subagent
4829
4830 **Tool Call: label**
4831 Status: Completed
4832
4833 subagent task response 1
4834
4835 subagent task response 2
4836
4837 ## Assistant
4838
4839 Response
4840
4841 "#},
4842 );
4843}
4844
4845#[gpui::test]
4846async fn test_subagent_tool_call_cancellation_during_task_prompt(cx: &mut TestAppContext) {
4847 init_test(cx);
4848 cx.update(|cx| {
4849 LanguageModelRegistry::test(cx);
4850 });
4851 cx.update(|cx| {
4852 cx.update_flags(true, vec!["subagents".to_string()]);
4853 });
4854
4855 let fs = FakeFs::new(cx.executor());
4856 fs.insert_tree(
4857 "/",
4858 json!({
4859 "a": {
4860 "b.md": "Lorem"
4861 }
4862 }),
4863 )
4864 .await;
4865 let project = Project::test(fs.clone(), [path!("/a").as_ref()], cx).await;
4866 let thread_store = cx.new(|cx| ThreadStore::new(cx));
4867 let agent = cx.update(|cx| {
4868 NativeAgent::new(thread_store.clone(), Templates::new(), None, fs.clone(), cx)
4869 });
4870 let connection = Rc::new(NativeAgentConnection(agent.clone()));
4871
4872 let acp_thread = cx
4873 .update(|cx| {
4874 connection
4875 .clone()
4876 .new_session(project.clone(), PathList::new(&[Path::new("")]), cx)
4877 })
4878 .await
4879 .unwrap();
4880 let session_id = acp_thread.read_with(cx, |thread, _| thread.session_id().clone());
4881 let thread = agent.read_with(cx, |agent, _| {
4882 agent.sessions.get(&session_id).unwrap().thread.clone()
4883 });
4884 let model = Arc::new(FakeLanguageModel::default());
4885
4886 // Ensure empty threads are not saved, even if they get mutated.
4887 thread.update(cx, |thread, cx| {
4888 thread.set_model(model.clone(), cx);
4889 });
4890 cx.run_until_parked();
4891
4892 let send = acp_thread.update(cx, |thread, cx| thread.send_raw("Prompt", cx));
4893 cx.run_until_parked();
4894 model.send_last_completion_stream_text_chunk("spawning subagent");
4895 let subagent_tool_input = SpawnAgentToolInput {
4896 label: "label".to_string(),
4897 message: "subagent task prompt".to_string(),
4898 session_id: None,
4899 };
4900 let subagent_tool_use = LanguageModelToolUse {
4901 id: "subagent_1".into(),
4902 name: SpawnAgentTool::NAME.into(),
4903 raw_input: serde_json::to_string(&subagent_tool_input).unwrap(),
4904 input: serde_json::to_value(&subagent_tool_input).unwrap(),
4905 is_input_complete: true,
4906 thought_signature: None,
4907 };
4908 model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
4909 subagent_tool_use,
4910 ));
4911 model.end_last_completion_stream();
4912
4913 cx.run_until_parked();
4914
4915 let subagent_session_id = thread.read_with(cx, |thread, cx| {
4916 thread
4917 .running_subagent_ids(cx)
4918 .get(0)
4919 .expect("subagent thread should be running")
4920 .clone()
4921 });
4922 let subagent_acp_thread = agent.read_with(cx, |agent, _cx| {
4923 agent
4924 .sessions
4925 .get(&subagent_session_id)
4926 .expect("subagent session should exist")
4927 .acp_thread
4928 .clone()
4929 });
4930
4931 // model.send_last_completion_stream_text_chunk("subagent task response");
4932 // model.end_last_completion_stream();
4933
4934 // cx.run_until_parked();
4935
4936 acp_thread.update(cx, |thread, cx| thread.cancel(cx)).await;
4937
4938 cx.run_until_parked();
4939
4940 send.await.unwrap();
4941
4942 acp_thread.read_with(cx, |thread, cx| {
4943 assert_eq!(thread.status(), ThreadStatus::Idle);
4944 assert_eq!(
4945 thread.to_markdown(cx),
4946 indoc! {"
4947 ## User
4948
4949 Prompt
4950
4951 ## Assistant
4952
4953 spawning subagent
4954
4955 **Tool Call: label**
4956 Status: Canceled
4957
4958 "}
4959 );
4960 });
4961 subagent_acp_thread.read_with(cx, |thread, cx| {
4962 assert_eq!(thread.status(), ThreadStatus::Idle);
4963 assert_eq!(
4964 thread.to_markdown(cx),
4965 indoc! {"
4966 ## User
4967
4968 subagent task prompt
4969
4970 "}
4971 );
4972 });
4973}
4974
4975#[gpui::test]
4976async fn test_subagent_tool_resume_session(cx: &mut TestAppContext) {
4977 init_test(cx);
4978 cx.update(|cx| {
4979 LanguageModelRegistry::test(cx);
4980 });
4981 cx.update(|cx| {
4982 cx.update_flags(true, vec!["subagents".to_string()]);
4983 });
4984
4985 let fs = FakeFs::new(cx.executor());
4986 fs.insert_tree(
4987 "/",
4988 json!({
4989 "a": {
4990 "b.md": "Lorem"
4991 }
4992 }),
4993 )
4994 .await;
4995 let project = Project::test(fs.clone(), [path!("/a").as_ref()], cx).await;
4996 let thread_store = cx.new(|cx| ThreadStore::new(cx));
4997 let agent = cx.update(|cx| {
4998 NativeAgent::new(thread_store.clone(), Templates::new(), None, fs.clone(), cx)
4999 });
5000 let connection = Rc::new(NativeAgentConnection(agent.clone()));
5001
5002 let acp_thread = cx
5003 .update(|cx| {
5004 connection
5005 .clone()
5006 .new_session(project.clone(), PathList::new(&[Path::new("")]), cx)
5007 })
5008 .await
5009 .unwrap();
5010 let session_id = acp_thread.read_with(cx, |thread, _| thread.session_id().clone());
5011 let thread = agent.read_with(cx, |agent, _| {
5012 agent.sessions.get(&session_id).unwrap().thread.clone()
5013 });
5014 let model = Arc::new(FakeLanguageModel::default());
5015
5016 thread.update(cx, |thread, cx| {
5017 thread.set_model(model.clone(), cx);
5018 });
5019 cx.run_until_parked();
5020
5021 // === First turn: create subagent ===
5022 let send = acp_thread.update(cx, |thread, cx| thread.send_raw("First prompt", cx));
5023 cx.run_until_parked();
5024 model.send_last_completion_stream_text_chunk("spawning subagent");
5025 let subagent_tool_input = SpawnAgentToolInput {
5026 label: "initial task".to_string(),
5027 message: "do the first task".to_string(),
5028 session_id: None,
5029 };
5030 let subagent_tool_use = LanguageModelToolUse {
5031 id: "subagent_1".into(),
5032 name: SpawnAgentTool::NAME.into(),
5033 raw_input: serde_json::to_string(&subagent_tool_input).unwrap(),
5034 input: serde_json::to_value(&subagent_tool_input).unwrap(),
5035 is_input_complete: true,
5036 thought_signature: None,
5037 };
5038 model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
5039 subagent_tool_use,
5040 ));
5041 model.end_last_completion_stream();
5042
5043 cx.run_until_parked();
5044
5045 let subagent_session_id = thread.read_with(cx, |thread, cx| {
5046 thread
5047 .running_subagent_ids(cx)
5048 .get(0)
5049 .expect("subagent thread should be running")
5050 .clone()
5051 });
5052
5053 let subagent_acp_thread = agent.read_with(cx, |agent, _cx| {
5054 agent
5055 .sessions
5056 .get(&subagent_session_id)
5057 .expect("subagent session should exist")
5058 .acp_thread
5059 .clone()
5060 });
5061
5062 // Subagent responds
5063 model.send_last_completion_stream_text_chunk("first task response");
5064 model.end_last_completion_stream();
5065
5066 cx.run_until_parked();
5067
5068 // Parent model responds to complete first turn
5069 model.send_last_completion_stream_text_chunk("First response");
5070 model.end_last_completion_stream();
5071
5072 send.await.unwrap();
5073
5074 // Verify subagent is no longer running
5075 thread.read_with(cx, |thread, cx| {
5076 assert!(
5077 thread.running_subagent_ids(cx).is_empty(),
5078 "subagent should not be running after completion"
5079 );
5080 });
5081
5082 // === Second turn: resume subagent with session_id ===
5083 let send2 = acp_thread.update(cx, |thread, cx| thread.send_raw("Follow up", cx));
5084 cx.run_until_parked();
5085 model.send_last_completion_stream_text_chunk("resuming subagent");
5086 let resume_tool_input = SpawnAgentToolInput {
5087 label: "follow-up task".to_string(),
5088 message: "do the follow-up task".to_string(),
5089 session_id: Some(subagent_session_id.clone()),
5090 };
5091 let resume_tool_use = LanguageModelToolUse {
5092 id: "subagent_2".into(),
5093 name: SpawnAgentTool::NAME.into(),
5094 raw_input: serde_json::to_string(&resume_tool_input).unwrap(),
5095 input: serde_json::to_value(&resume_tool_input).unwrap(),
5096 is_input_complete: true,
5097 thought_signature: None,
5098 };
5099 model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(resume_tool_use));
5100 model.end_last_completion_stream();
5101
5102 cx.run_until_parked();
5103
5104 // Subagent should be running again with the same session
5105 thread.read_with(cx, |thread, cx| {
5106 let running = thread.running_subagent_ids(cx);
5107 assert_eq!(running.len(), 1, "subagent should be running");
5108 assert_eq!(running[0], subagent_session_id, "should be same session");
5109 });
5110
5111 // Subagent responds to follow-up
5112 model.send_last_completion_stream_text_chunk("follow-up task response");
5113 model.end_last_completion_stream();
5114
5115 cx.run_until_parked();
5116
5117 // Parent model responds to complete second turn
5118 model.send_last_completion_stream_text_chunk("Second response");
5119 model.end_last_completion_stream();
5120
5121 send2.await.unwrap();
5122
5123 // Verify subagent is no longer running
5124 thread.read_with(cx, |thread, cx| {
5125 assert!(
5126 thread.running_subagent_ids(cx).is_empty(),
5127 "subagent should not be running after resume completion"
5128 );
5129 });
5130
5131 // Verify the subagent's acp thread has both conversation turns
5132 assert_eq!(
5133 subagent_acp_thread.read_with(cx, |thread, cx| thread.to_markdown(cx)),
5134 indoc! {"
5135 ## User
5136
5137 do the first task
5138
5139 ## Assistant
5140
5141 first task response
5142
5143 ## User
5144
5145 do the follow-up task
5146
5147 ## Assistant
5148
5149 follow-up task response
5150
5151 "}
5152 );
5153}
5154
5155#[gpui::test]
5156async fn test_subagent_thread_inherits_parent_thread_properties(cx: &mut TestAppContext) {
5157 init_test(cx);
5158
5159 cx.update(|cx| {
5160 cx.update_flags(true, vec!["subagents".to_string()]);
5161 });
5162
5163 let fs = FakeFs::new(cx.executor());
5164 fs.insert_tree(path!("/test"), json!({})).await;
5165 let project = Project::test(fs, [path!("/test").as_ref()], cx).await;
5166 let project_context = cx.new(|_cx| ProjectContext::default());
5167 let context_server_store = project.read_with(cx, |project, _| project.context_server_store());
5168 let context_server_registry =
5169 cx.new(|cx| ContextServerRegistry::new(context_server_store.clone(), cx));
5170 let model = Arc::new(FakeLanguageModel::default());
5171
5172 let parent_thread = cx.new(|cx| {
5173 Thread::new(
5174 project.clone(),
5175 project_context,
5176 context_server_registry,
5177 Templates::new(),
5178 Some(model.clone()),
5179 cx,
5180 )
5181 });
5182
5183 let subagent_thread = cx.new(|cx| Thread::new_subagent(&parent_thread, cx));
5184 subagent_thread.read_with(cx, |subagent_thread, cx| {
5185 assert!(subagent_thread.is_subagent());
5186 assert_eq!(subagent_thread.depth(), 1);
5187 assert_eq!(
5188 subagent_thread.model().map(|model| model.id()),
5189 Some(model.id())
5190 );
5191 assert_eq!(
5192 subagent_thread.parent_thread_id(),
5193 Some(parent_thread.read(cx).id().clone())
5194 );
5195
5196 let request = subagent_thread
5197 .build_completion_request(CompletionIntent::UserPrompt, cx)
5198 .unwrap();
5199 assert_eq!(request.intent, Some(CompletionIntent::Subagent));
5200 });
5201}
5202
5203#[gpui::test]
5204async fn test_max_subagent_depth_prevents_tool_registration(cx: &mut TestAppContext) {
5205 init_test(cx);
5206
5207 cx.update(|cx| {
5208 cx.update_flags(true, vec!["subagents".to_string()]);
5209 });
5210
5211 let fs = FakeFs::new(cx.executor());
5212 fs.insert_tree(path!("/test"), json!({})).await;
5213 let project = Project::test(fs, [path!("/test").as_ref()], cx).await;
5214 let project_context = cx.new(|_cx| ProjectContext::default());
5215 let context_server_store = project.read_with(cx, |project, _| project.context_server_store());
5216 let context_server_registry =
5217 cx.new(|cx| ContextServerRegistry::new(context_server_store.clone(), cx));
5218 let model = Arc::new(FakeLanguageModel::default());
5219 let environment = Rc::new(cx.update(|cx| {
5220 FakeThreadEnvironment::default().with_terminal(FakeTerminalHandle::new_never_exits(cx))
5221 }));
5222
5223 let deep_parent_thread = cx.new(|cx| {
5224 let mut thread = Thread::new(
5225 project.clone(),
5226 project_context,
5227 context_server_registry,
5228 Templates::new(),
5229 Some(model.clone()),
5230 cx,
5231 );
5232 thread.set_subagent_context(SubagentContext {
5233 parent_thread_id: agent_client_protocol::SessionId::new("parent-id"),
5234 depth: MAX_SUBAGENT_DEPTH - 1,
5235 });
5236 thread
5237 });
5238 let deep_subagent_thread = cx.new(|cx| {
5239 let mut thread = Thread::new_subagent(&deep_parent_thread, cx);
5240 thread.add_default_tools(environment, cx);
5241 thread
5242 });
5243
5244 deep_subagent_thread.read_with(cx, |thread, _| {
5245 assert_eq!(thread.depth(), MAX_SUBAGENT_DEPTH);
5246 assert!(
5247 !thread.has_registered_tool(SpawnAgentTool::NAME),
5248 "subagent tool should not be present at max depth"
5249 );
5250 });
5251}
5252
5253#[gpui::test]
5254async fn test_parent_cancel_stops_subagent(cx: &mut TestAppContext) {
5255 init_test(cx);
5256
5257 cx.update(|cx| {
5258 cx.update_flags(true, vec!["subagents".to_string()]);
5259 });
5260
5261 let fs = FakeFs::new(cx.executor());
5262 fs.insert_tree(path!("/test"), json!({})).await;
5263 let project = Project::test(fs, [path!("/test").as_ref()], cx).await;
5264 let project_context = cx.new(|_cx| ProjectContext::default());
5265 let context_server_store = project.read_with(cx, |project, _| project.context_server_store());
5266 let context_server_registry =
5267 cx.new(|cx| ContextServerRegistry::new(context_server_store.clone(), cx));
5268 let model = Arc::new(FakeLanguageModel::default());
5269
5270 let parent = cx.new(|cx| {
5271 Thread::new(
5272 project.clone(),
5273 project_context.clone(),
5274 context_server_registry.clone(),
5275 Templates::new(),
5276 Some(model.clone()),
5277 cx,
5278 )
5279 });
5280
5281 let subagent = cx.new(|cx| Thread::new_subagent(&parent, cx));
5282
5283 parent.update(cx, |thread, _cx| {
5284 thread.register_running_subagent(subagent.downgrade());
5285 });
5286
5287 subagent
5288 .update(cx, |thread, cx| {
5289 thread.send(UserMessageId::new(), ["Do work".to_string()], cx)
5290 })
5291 .unwrap();
5292 cx.run_until_parked();
5293
5294 subagent.read_with(cx, |thread, _| {
5295 assert!(!thread.is_turn_complete(), "subagent should be running");
5296 });
5297
5298 parent.update(cx, |thread, cx| {
5299 thread.cancel(cx).detach();
5300 });
5301
5302 subagent.read_with(cx, |thread, _| {
5303 assert!(
5304 thread.is_turn_complete(),
5305 "subagent should be cancelled when parent cancels"
5306 );
5307 });
5308}
5309
5310#[gpui::test]
5311async fn test_subagent_context_window_warning(cx: &mut TestAppContext) {
5312 init_test(cx);
5313 cx.update(|cx| {
5314 LanguageModelRegistry::test(cx);
5315 });
5316 cx.update(|cx| {
5317 cx.update_flags(true, vec!["subagents".to_string()]);
5318 });
5319
5320 let fs = FakeFs::new(cx.executor());
5321 fs.insert_tree(
5322 "/",
5323 json!({
5324 "a": {
5325 "b.md": "Lorem"
5326 }
5327 }),
5328 )
5329 .await;
5330 let project = Project::test(fs.clone(), [path!("/a").as_ref()], cx).await;
5331 let thread_store = cx.new(|cx| ThreadStore::new(cx));
5332 let agent = cx.update(|cx| {
5333 NativeAgent::new(thread_store.clone(), Templates::new(), None, fs.clone(), cx)
5334 });
5335 let connection = Rc::new(NativeAgentConnection(agent.clone()));
5336
5337 let acp_thread = cx
5338 .update(|cx| {
5339 connection
5340 .clone()
5341 .new_session(project.clone(), PathList::new(&[Path::new("")]), cx)
5342 })
5343 .await
5344 .unwrap();
5345 let session_id = acp_thread.read_with(cx, |thread, _| thread.session_id().clone());
5346 let thread = agent.read_with(cx, |agent, _| {
5347 agent.sessions.get(&session_id).unwrap().thread.clone()
5348 });
5349 let model = Arc::new(FakeLanguageModel::default());
5350
5351 thread.update(cx, |thread, cx| {
5352 thread.set_model(model.clone(), cx);
5353 });
5354 cx.run_until_parked();
5355
5356 // Start the parent turn
5357 let send = acp_thread.update(cx, |thread, cx| thread.send_raw("Prompt", cx));
5358 cx.run_until_parked();
5359 model.send_last_completion_stream_text_chunk("spawning subagent");
5360 let subagent_tool_input = SpawnAgentToolInput {
5361 label: "label".to_string(),
5362 message: "subagent task prompt".to_string(),
5363 session_id: None,
5364 };
5365 let subagent_tool_use = LanguageModelToolUse {
5366 id: "subagent_1".into(),
5367 name: SpawnAgentTool::NAME.into(),
5368 raw_input: serde_json::to_string(&subagent_tool_input).unwrap(),
5369 input: serde_json::to_value(&subagent_tool_input).unwrap(),
5370 is_input_complete: true,
5371 thought_signature: None,
5372 };
5373 model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
5374 subagent_tool_use,
5375 ));
5376 model.end_last_completion_stream();
5377
5378 cx.run_until_parked();
5379
5380 // Verify subagent is running
5381 let subagent_session_id = thread.read_with(cx, |thread, cx| {
5382 thread
5383 .running_subagent_ids(cx)
5384 .get(0)
5385 .expect("subagent thread should be running")
5386 .clone()
5387 });
5388
5389 // Send a usage update that crosses the warning threshold (80% of 1,000,000)
5390 model.send_last_completion_stream_text_chunk("partial work");
5391 model.send_last_completion_stream_event(LanguageModelCompletionEvent::UsageUpdate(
5392 TokenUsage {
5393 input_tokens: 850_000,
5394 output_tokens: 0,
5395 cache_creation_input_tokens: 0,
5396 cache_read_input_tokens: 0,
5397 },
5398 ));
5399
5400 cx.run_until_parked();
5401
5402 // The subagent should no longer be running
5403 thread.read_with(cx, |thread, cx| {
5404 assert!(
5405 thread.running_subagent_ids(cx).is_empty(),
5406 "subagent should be stopped after context window warning"
5407 );
5408 });
5409
5410 // The parent model should get a new completion request to respond to the tool error
5411 model.send_last_completion_stream_text_chunk("Response after warning");
5412 model.end_last_completion_stream();
5413
5414 send.await.unwrap();
5415
5416 // Verify the parent thread shows the warning error in the tool call
5417 let markdown = acp_thread.read_with(cx, |thread, cx| thread.to_markdown(cx));
5418 assert!(
5419 markdown.contains("nearing the end of its context window"),
5420 "tool output should contain context window warning message, got:\n{markdown}"
5421 );
5422 assert!(
5423 markdown.contains("Status: Failed"),
5424 "tool call should have Failed status, got:\n{markdown}"
5425 );
5426
5427 // Verify the subagent session still exists (can be resumed)
5428 agent.read_with(cx, |agent, _cx| {
5429 assert!(
5430 agent.sessions.contains_key(&subagent_session_id),
5431 "subagent session should still exist for potential resume"
5432 );
5433 });
5434}
5435
5436#[gpui::test]
5437async fn test_subagent_no_context_window_warning_when_already_at_warning(cx: &mut TestAppContext) {
5438 init_test(cx);
5439 cx.update(|cx| {
5440 LanguageModelRegistry::test(cx);
5441 });
5442 cx.update(|cx| {
5443 cx.update_flags(true, vec!["subagents".to_string()]);
5444 });
5445
5446 let fs = FakeFs::new(cx.executor());
5447 fs.insert_tree(
5448 "/",
5449 json!({
5450 "a": {
5451 "b.md": "Lorem"
5452 }
5453 }),
5454 )
5455 .await;
5456 let project = Project::test(fs.clone(), [path!("/a").as_ref()], cx).await;
5457 let thread_store = cx.new(|cx| ThreadStore::new(cx));
5458 let agent = cx.update(|cx| {
5459 NativeAgent::new(thread_store.clone(), Templates::new(), None, fs.clone(), cx)
5460 });
5461 let connection = Rc::new(NativeAgentConnection(agent.clone()));
5462
5463 let acp_thread = cx
5464 .update(|cx| {
5465 connection
5466 .clone()
5467 .new_session(project.clone(), PathList::new(&[Path::new("")]), cx)
5468 })
5469 .await
5470 .unwrap();
5471 let session_id = acp_thread.read_with(cx, |thread, _| thread.session_id().clone());
5472 let thread = agent.read_with(cx, |agent, _| {
5473 agent.sessions.get(&session_id).unwrap().thread.clone()
5474 });
5475 let model = Arc::new(FakeLanguageModel::default());
5476
5477 thread.update(cx, |thread, cx| {
5478 thread.set_model(model.clone(), cx);
5479 });
5480 cx.run_until_parked();
5481
5482 // === First turn: create subagent, trigger context window warning ===
5483 let send = acp_thread.update(cx, |thread, cx| thread.send_raw("First prompt", cx));
5484 cx.run_until_parked();
5485 model.send_last_completion_stream_text_chunk("spawning subagent");
5486 let subagent_tool_input = SpawnAgentToolInput {
5487 label: "initial task".to_string(),
5488 message: "do the first task".to_string(),
5489 session_id: None,
5490 };
5491 let subagent_tool_use = LanguageModelToolUse {
5492 id: "subagent_1".into(),
5493 name: SpawnAgentTool::NAME.into(),
5494 raw_input: serde_json::to_string(&subagent_tool_input).unwrap(),
5495 input: serde_json::to_value(&subagent_tool_input).unwrap(),
5496 is_input_complete: true,
5497 thought_signature: None,
5498 };
5499 model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
5500 subagent_tool_use,
5501 ));
5502 model.end_last_completion_stream();
5503
5504 cx.run_until_parked();
5505
5506 let subagent_session_id = thread.read_with(cx, |thread, cx| {
5507 thread
5508 .running_subagent_ids(cx)
5509 .get(0)
5510 .expect("subagent thread should be running")
5511 .clone()
5512 });
5513
5514 // Subagent sends a usage update that crosses the warning threshold.
5515 // This triggers Normal→Warning, stopping the subagent.
5516 model.send_last_completion_stream_text_chunk("partial work");
5517 model.send_last_completion_stream_event(LanguageModelCompletionEvent::UsageUpdate(
5518 TokenUsage {
5519 input_tokens: 850_000,
5520 output_tokens: 0,
5521 cache_creation_input_tokens: 0,
5522 cache_read_input_tokens: 0,
5523 },
5524 ));
5525
5526 cx.run_until_parked();
5527
5528 // Verify the first turn was stopped with a context window warning
5529 thread.read_with(cx, |thread, cx| {
5530 assert!(
5531 thread.running_subagent_ids(cx).is_empty(),
5532 "subagent should be stopped after context window warning"
5533 );
5534 });
5535
5536 // Parent model responds to complete first turn
5537 model.send_last_completion_stream_text_chunk("First response");
5538 model.end_last_completion_stream();
5539
5540 send.await.unwrap();
5541
5542 let markdown = acp_thread.read_with(cx, |thread, cx| thread.to_markdown(cx));
5543 assert!(
5544 markdown.contains("nearing the end of its context window"),
5545 "first turn should have context window warning, got:\n{markdown}"
5546 );
5547
5548 // === Second turn: resume the same subagent (now at Warning level) ===
5549 let send2 = acp_thread.update(cx, |thread, cx| thread.send_raw("Follow up", cx));
5550 cx.run_until_parked();
5551 model.send_last_completion_stream_text_chunk("resuming subagent");
5552 let resume_tool_input = SpawnAgentToolInput {
5553 label: "follow-up task".to_string(),
5554 message: "do the follow-up task".to_string(),
5555 session_id: Some(subagent_session_id.clone()),
5556 };
5557 let resume_tool_use = LanguageModelToolUse {
5558 id: "subagent_2".into(),
5559 name: SpawnAgentTool::NAME.into(),
5560 raw_input: serde_json::to_string(&resume_tool_input).unwrap(),
5561 input: serde_json::to_value(&resume_tool_input).unwrap(),
5562 is_input_complete: true,
5563 thought_signature: None,
5564 };
5565 model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(resume_tool_use));
5566 model.end_last_completion_stream();
5567
5568 cx.run_until_parked();
5569
5570 // Subagent responds with tokens still at warning level (no worse).
5571 // Since ratio_before_prompt was already Warning, this should NOT
5572 // trigger the context window warning again.
5573 model.send_last_completion_stream_text_chunk("follow-up task response");
5574 model.send_last_completion_stream_event(LanguageModelCompletionEvent::UsageUpdate(
5575 TokenUsage {
5576 input_tokens: 870_000,
5577 output_tokens: 0,
5578 cache_creation_input_tokens: 0,
5579 cache_read_input_tokens: 0,
5580 },
5581 ));
5582 model.end_last_completion_stream();
5583
5584 cx.run_until_parked();
5585
5586 // Parent model responds to complete second turn
5587 model.send_last_completion_stream_text_chunk("Second response");
5588 model.end_last_completion_stream();
5589
5590 send2.await.unwrap();
5591
5592 // The resumed subagent should have completed normally since the ratio
5593 // didn't transition (it was Warning before and stayed at Warning)
5594 let markdown = acp_thread.read_with(cx, |thread, cx| thread.to_markdown(cx));
5595 assert!(
5596 markdown.contains("follow-up task response"),
5597 "resumed subagent should complete normally when already at warning, got:\n{markdown}"
5598 );
5599 // The second tool call should NOT have a context window warning
5600 let second_tool_pos = markdown
5601 .find("follow-up task")
5602 .expect("should find follow-up tool call");
5603 let after_second_tool = &markdown[second_tool_pos..];
5604 assert!(
5605 !after_second_tool.contains("nearing the end of its context window"),
5606 "should NOT contain context window warning for resumed subagent at same level, got:\n{after_second_tool}"
5607 );
5608}
5609
5610#[gpui::test]
5611async fn test_subagent_error_propagation(cx: &mut TestAppContext) {
5612 init_test(cx);
5613 cx.update(|cx| {
5614 LanguageModelRegistry::test(cx);
5615 });
5616 cx.update(|cx| {
5617 cx.update_flags(true, vec!["subagents".to_string()]);
5618 });
5619
5620 let fs = FakeFs::new(cx.executor());
5621 fs.insert_tree(
5622 "/",
5623 json!({
5624 "a": {
5625 "b.md": "Lorem"
5626 }
5627 }),
5628 )
5629 .await;
5630 let project = Project::test(fs.clone(), [path!("/a").as_ref()], cx).await;
5631 let thread_store = cx.new(|cx| ThreadStore::new(cx));
5632 let agent = cx.update(|cx| {
5633 NativeAgent::new(thread_store.clone(), Templates::new(), None, fs.clone(), cx)
5634 });
5635 let connection = Rc::new(NativeAgentConnection(agent.clone()));
5636
5637 let acp_thread = cx
5638 .update(|cx| {
5639 connection
5640 .clone()
5641 .new_session(project.clone(), PathList::new(&[Path::new("")]), cx)
5642 })
5643 .await
5644 .unwrap();
5645 let session_id = acp_thread.read_with(cx, |thread, _| thread.session_id().clone());
5646 let thread = agent.read_with(cx, |agent, _| {
5647 agent.sessions.get(&session_id).unwrap().thread.clone()
5648 });
5649 let model = Arc::new(FakeLanguageModel::default());
5650
5651 thread.update(cx, |thread, cx| {
5652 thread.set_model(model.clone(), cx);
5653 });
5654 cx.run_until_parked();
5655
5656 // Start the parent turn
5657 let send = acp_thread.update(cx, |thread, cx| thread.send_raw("Prompt", cx));
5658 cx.run_until_parked();
5659 model.send_last_completion_stream_text_chunk("spawning subagent");
5660 let subagent_tool_input = SpawnAgentToolInput {
5661 label: "label".to_string(),
5662 message: "subagent task prompt".to_string(),
5663 session_id: None,
5664 };
5665 let subagent_tool_use = LanguageModelToolUse {
5666 id: "subagent_1".into(),
5667 name: SpawnAgentTool::NAME.into(),
5668 raw_input: serde_json::to_string(&subagent_tool_input).unwrap(),
5669 input: serde_json::to_value(&subagent_tool_input).unwrap(),
5670 is_input_complete: true,
5671 thought_signature: None,
5672 };
5673 model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
5674 subagent_tool_use,
5675 ));
5676 model.end_last_completion_stream();
5677
5678 cx.run_until_parked();
5679
5680 // Verify subagent is running
5681 thread.read_with(cx, |thread, cx| {
5682 assert!(
5683 !thread.running_subagent_ids(cx).is_empty(),
5684 "subagent should be running"
5685 );
5686 });
5687
5688 // The subagent's model returns a non-retryable error
5689 model.send_last_completion_stream_error(LanguageModelCompletionError::PromptTooLarge {
5690 tokens: None,
5691 });
5692
5693 cx.run_until_parked();
5694
5695 // The subagent should no longer be running
5696 thread.read_with(cx, |thread, cx| {
5697 assert!(
5698 thread.running_subagent_ids(cx).is_empty(),
5699 "subagent should not be running after error"
5700 );
5701 });
5702
5703 // The parent model should get a new completion request to respond to the tool error
5704 model.send_last_completion_stream_text_chunk("Response after error");
5705 model.end_last_completion_stream();
5706
5707 send.await.unwrap();
5708
5709 // Verify the parent thread shows the error in the tool call
5710 let markdown = acp_thread.read_with(cx, |thread, cx| thread.to_markdown(cx));
5711 assert!(
5712 markdown.contains("Status: Failed"),
5713 "tool call should have Failed status after model error, got:\n{markdown}"
5714 );
5715}
5716
5717#[gpui::test]
5718async fn test_edit_file_tool_deny_rule_blocks_edit(cx: &mut TestAppContext) {
5719 init_test(cx);
5720
5721 let fs = FakeFs::new(cx.executor());
5722 fs.insert_tree("/root", json!({"sensitive_config.txt": "secret data"}))
5723 .await;
5724 let project = Project::test(fs.clone(), ["/root".as_ref()], cx).await;
5725
5726 cx.update(|cx| {
5727 let mut settings = agent_settings::AgentSettings::get_global(cx).clone();
5728 settings.tool_permissions.tools.insert(
5729 EditFileTool::NAME.into(),
5730 agent_settings::ToolRules {
5731 default: Some(settings::ToolPermissionMode::Allow),
5732 always_allow: vec![],
5733 always_deny: vec![agent_settings::CompiledRegex::new(r"sensitive", false).unwrap()],
5734 always_confirm: vec![],
5735 invalid_patterns: vec![],
5736 },
5737 );
5738 agent_settings::AgentSettings::override_global(settings, cx);
5739 });
5740
5741 let context_server_registry =
5742 cx.new(|cx| crate::ContextServerRegistry::new(project.read(cx).context_server_store(), cx));
5743 let language_registry = project.read_with(cx, |project, _cx| project.languages().clone());
5744 let templates = crate::Templates::new();
5745 let thread = cx.new(|cx| {
5746 crate::Thread::new(
5747 project.clone(),
5748 cx.new(|_cx| prompt_store::ProjectContext::default()),
5749 context_server_registry,
5750 templates.clone(),
5751 None,
5752 cx,
5753 )
5754 });
5755
5756 #[allow(clippy::arc_with_non_send_sync)]
5757 let tool = Arc::new(crate::EditFileTool::new(
5758 project.clone(),
5759 thread.downgrade(),
5760 language_registry,
5761 templates,
5762 ));
5763 let (event_stream, _rx) = crate::ToolCallEventStream::test();
5764
5765 let task = cx.update(|cx| {
5766 tool.run(
5767 ToolInput::resolved(crate::EditFileToolInput {
5768 display_description: "Edit sensitive file".to_string(),
5769 path: "root/sensitive_config.txt".into(),
5770 mode: crate::EditFileMode::Edit,
5771 }),
5772 event_stream,
5773 cx,
5774 )
5775 });
5776
5777 let result = task.await;
5778 assert!(result.is_err(), "expected edit to be blocked");
5779 assert!(
5780 result.unwrap_err().to_string().contains("blocked"),
5781 "error should mention the edit was blocked"
5782 );
5783}
5784
5785#[gpui::test]
5786async fn test_delete_path_tool_deny_rule_blocks_deletion(cx: &mut TestAppContext) {
5787 init_test(cx);
5788
5789 let fs = FakeFs::new(cx.executor());
5790 fs.insert_tree("/root", json!({"important_data.txt": "critical info"}))
5791 .await;
5792 let project = Project::test(fs.clone(), ["/root".as_ref()], cx).await;
5793
5794 cx.update(|cx| {
5795 let mut settings = agent_settings::AgentSettings::get_global(cx).clone();
5796 settings.tool_permissions.tools.insert(
5797 DeletePathTool::NAME.into(),
5798 agent_settings::ToolRules {
5799 default: Some(settings::ToolPermissionMode::Allow),
5800 always_allow: vec![],
5801 always_deny: vec![agent_settings::CompiledRegex::new(r"important", false).unwrap()],
5802 always_confirm: vec![],
5803 invalid_patterns: vec![],
5804 },
5805 );
5806 agent_settings::AgentSettings::override_global(settings, cx);
5807 });
5808
5809 let action_log = cx.new(|_cx| action_log::ActionLog::new(project.clone()));
5810
5811 #[allow(clippy::arc_with_non_send_sync)]
5812 let tool = Arc::new(crate::DeletePathTool::new(project, action_log));
5813 let (event_stream, _rx) = crate::ToolCallEventStream::test();
5814
5815 let task = cx.update(|cx| {
5816 tool.run(
5817 ToolInput::resolved(crate::DeletePathToolInput {
5818 path: "root/important_data.txt".to_string(),
5819 }),
5820 event_stream,
5821 cx,
5822 )
5823 });
5824
5825 let result = task.await;
5826 assert!(result.is_err(), "expected deletion to be blocked");
5827 assert!(
5828 result.unwrap_err().contains("blocked"),
5829 "error should mention the deletion was blocked"
5830 );
5831}
5832
5833#[gpui::test]
5834async fn test_move_path_tool_denies_if_destination_denied(cx: &mut TestAppContext) {
5835 init_test(cx);
5836
5837 let fs = FakeFs::new(cx.executor());
5838 fs.insert_tree(
5839 "/root",
5840 json!({
5841 "safe.txt": "content",
5842 "protected": {}
5843 }),
5844 )
5845 .await;
5846 let project = Project::test(fs.clone(), ["/root".as_ref()], cx).await;
5847
5848 cx.update(|cx| {
5849 let mut settings = agent_settings::AgentSettings::get_global(cx).clone();
5850 settings.tool_permissions.tools.insert(
5851 MovePathTool::NAME.into(),
5852 agent_settings::ToolRules {
5853 default: Some(settings::ToolPermissionMode::Allow),
5854 always_allow: vec![],
5855 always_deny: vec![agent_settings::CompiledRegex::new(r"protected", false).unwrap()],
5856 always_confirm: vec![],
5857 invalid_patterns: vec![],
5858 },
5859 );
5860 agent_settings::AgentSettings::override_global(settings, cx);
5861 });
5862
5863 #[allow(clippy::arc_with_non_send_sync)]
5864 let tool = Arc::new(crate::MovePathTool::new(project));
5865 let (event_stream, _rx) = crate::ToolCallEventStream::test();
5866
5867 let task = cx.update(|cx| {
5868 tool.run(
5869 ToolInput::resolved(crate::MovePathToolInput {
5870 source_path: "root/safe.txt".to_string(),
5871 destination_path: "root/protected/safe.txt".to_string(),
5872 }),
5873 event_stream,
5874 cx,
5875 )
5876 });
5877
5878 let result = task.await;
5879 assert!(
5880 result.is_err(),
5881 "expected move to be blocked due to destination path"
5882 );
5883 assert!(
5884 result.unwrap_err().contains("blocked"),
5885 "error should mention the move was blocked"
5886 );
5887}
5888
5889#[gpui::test]
5890async fn test_move_path_tool_denies_if_source_denied(cx: &mut TestAppContext) {
5891 init_test(cx);
5892
5893 let fs = FakeFs::new(cx.executor());
5894 fs.insert_tree(
5895 "/root",
5896 json!({
5897 "secret.txt": "secret content",
5898 "public": {}
5899 }),
5900 )
5901 .await;
5902 let project = Project::test(fs.clone(), ["/root".as_ref()], cx).await;
5903
5904 cx.update(|cx| {
5905 let mut settings = agent_settings::AgentSettings::get_global(cx).clone();
5906 settings.tool_permissions.tools.insert(
5907 MovePathTool::NAME.into(),
5908 agent_settings::ToolRules {
5909 default: Some(settings::ToolPermissionMode::Allow),
5910 always_allow: vec![],
5911 always_deny: vec![agent_settings::CompiledRegex::new(r"secret", false).unwrap()],
5912 always_confirm: vec![],
5913 invalid_patterns: vec![],
5914 },
5915 );
5916 agent_settings::AgentSettings::override_global(settings, cx);
5917 });
5918
5919 #[allow(clippy::arc_with_non_send_sync)]
5920 let tool = Arc::new(crate::MovePathTool::new(project));
5921 let (event_stream, _rx) = crate::ToolCallEventStream::test();
5922
5923 let task = cx.update(|cx| {
5924 tool.run(
5925 ToolInput::resolved(crate::MovePathToolInput {
5926 source_path: "root/secret.txt".to_string(),
5927 destination_path: "root/public/not_secret.txt".to_string(),
5928 }),
5929 event_stream,
5930 cx,
5931 )
5932 });
5933
5934 let result = task.await;
5935 assert!(
5936 result.is_err(),
5937 "expected move to be blocked due to source path"
5938 );
5939 assert!(
5940 result.unwrap_err().contains("blocked"),
5941 "error should mention the move was blocked"
5942 );
5943}
5944
5945#[gpui::test]
5946async fn test_copy_path_tool_deny_rule_blocks_copy(cx: &mut TestAppContext) {
5947 init_test(cx);
5948
5949 let fs = FakeFs::new(cx.executor());
5950 fs.insert_tree(
5951 "/root",
5952 json!({
5953 "confidential.txt": "confidential data",
5954 "dest": {}
5955 }),
5956 )
5957 .await;
5958 let project = Project::test(fs.clone(), ["/root".as_ref()], cx).await;
5959
5960 cx.update(|cx| {
5961 let mut settings = agent_settings::AgentSettings::get_global(cx).clone();
5962 settings.tool_permissions.tools.insert(
5963 CopyPathTool::NAME.into(),
5964 agent_settings::ToolRules {
5965 default: Some(settings::ToolPermissionMode::Allow),
5966 always_allow: vec![],
5967 always_deny: vec![
5968 agent_settings::CompiledRegex::new(r"confidential", false).unwrap(),
5969 ],
5970 always_confirm: vec![],
5971 invalid_patterns: vec![],
5972 },
5973 );
5974 agent_settings::AgentSettings::override_global(settings, cx);
5975 });
5976
5977 #[allow(clippy::arc_with_non_send_sync)]
5978 let tool = Arc::new(crate::CopyPathTool::new(project));
5979 let (event_stream, _rx) = crate::ToolCallEventStream::test();
5980
5981 let task = cx.update(|cx| {
5982 tool.run(
5983 ToolInput::resolved(crate::CopyPathToolInput {
5984 source_path: "root/confidential.txt".to_string(),
5985 destination_path: "root/dest/copy.txt".to_string(),
5986 }),
5987 event_stream,
5988 cx,
5989 )
5990 });
5991
5992 let result = task.await;
5993 assert!(result.is_err(), "expected copy to be blocked");
5994 assert!(
5995 result.unwrap_err().contains("blocked"),
5996 "error should mention the copy was blocked"
5997 );
5998}
5999
6000#[gpui::test]
6001async fn test_save_file_tool_denies_if_any_path_denied(cx: &mut TestAppContext) {
6002 init_test(cx);
6003
6004 let fs = FakeFs::new(cx.executor());
6005 fs.insert_tree(
6006 "/root",
6007 json!({
6008 "normal.txt": "normal content",
6009 "readonly": {
6010 "config.txt": "readonly content"
6011 }
6012 }),
6013 )
6014 .await;
6015 let project = Project::test(fs.clone(), ["/root".as_ref()], cx).await;
6016
6017 cx.update(|cx| {
6018 let mut settings = agent_settings::AgentSettings::get_global(cx).clone();
6019 settings.tool_permissions.tools.insert(
6020 SaveFileTool::NAME.into(),
6021 agent_settings::ToolRules {
6022 default: Some(settings::ToolPermissionMode::Allow),
6023 always_allow: vec![],
6024 always_deny: vec![agent_settings::CompiledRegex::new(r"readonly", false).unwrap()],
6025 always_confirm: vec![],
6026 invalid_patterns: vec![],
6027 },
6028 );
6029 agent_settings::AgentSettings::override_global(settings, cx);
6030 });
6031
6032 #[allow(clippy::arc_with_non_send_sync)]
6033 let tool = Arc::new(crate::SaveFileTool::new(project));
6034 let (event_stream, _rx) = crate::ToolCallEventStream::test();
6035
6036 let task = cx.update(|cx| {
6037 tool.run(
6038 ToolInput::resolved(crate::SaveFileToolInput {
6039 paths: vec![
6040 std::path::PathBuf::from("root/normal.txt"),
6041 std::path::PathBuf::from("root/readonly/config.txt"),
6042 ],
6043 }),
6044 event_stream,
6045 cx,
6046 )
6047 });
6048
6049 let result = task.await;
6050 assert!(
6051 result.is_err(),
6052 "expected save to be blocked due to denied path"
6053 );
6054 assert!(
6055 result.unwrap_err().contains("blocked"),
6056 "error should mention the save was blocked"
6057 );
6058}
6059
6060#[gpui::test]
6061async fn test_save_file_tool_respects_deny_rules(cx: &mut TestAppContext) {
6062 init_test(cx);
6063
6064 let fs = FakeFs::new(cx.executor());
6065 fs.insert_tree("/root", json!({"config.secret": "secret config"}))
6066 .await;
6067 let project = Project::test(fs.clone(), ["/root".as_ref()], cx).await;
6068
6069 cx.update(|cx| {
6070 let mut settings = agent_settings::AgentSettings::get_global(cx).clone();
6071 settings.tool_permissions.tools.insert(
6072 SaveFileTool::NAME.into(),
6073 agent_settings::ToolRules {
6074 default: Some(settings::ToolPermissionMode::Allow),
6075 always_allow: vec![],
6076 always_deny: vec![agent_settings::CompiledRegex::new(r"\.secret$", false).unwrap()],
6077 always_confirm: vec![],
6078 invalid_patterns: vec![],
6079 },
6080 );
6081 agent_settings::AgentSettings::override_global(settings, cx);
6082 });
6083
6084 #[allow(clippy::arc_with_non_send_sync)]
6085 let tool = Arc::new(crate::SaveFileTool::new(project));
6086 let (event_stream, _rx) = crate::ToolCallEventStream::test();
6087
6088 let task = cx.update(|cx| {
6089 tool.run(
6090 ToolInput::resolved(crate::SaveFileToolInput {
6091 paths: vec![std::path::PathBuf::from("root/config.secret")],
6092 }),
6093 event_stream,
6094 cx,
6095 )
6096 });
6097
6098 let result = task.await;
6099 assert!(result.is_err(), "expected save to be blocked");
6100 assert!(
6101 result.unwrap_err().contains("blocked"),
6102 "error should mention the save was blocked"
6103 );
6104}
6105
6106#[gpui::test]
6107async fn test_web_search_tool_deny_rule_blocks_search(cx: &mut TestAppContext) {
6108 init_test(cx);
6109
6110 cx.update(|cx| {
6111 let mut settings = agent_settings::AgentSettings::get_global(cx).clone();
6112 settings.tool_permissions.tools.insert(
6113 WebSearchTool::NAME.into(),
6114 agent_settings::ToolRules {
6115 default: Some(settings::ToolPermissionMode::Allow),
6116 always_allow: vec![],
6117 always_deny: vec![
6118 agent_settings::CompiledRegex::new(r"internal\.company", false).unwrap(),
6119 ],
6120 always_confirm: vec![],
6121 invalid_patterns: vec![],
6122 },
6123 );
6124 agent_settings::AgentSettings::override_global(settings, cx);
6125 });
6126
6127 #[allow(clippy::arc_with_non_send_sync)]
6128 let tool = Arc::new(crate::WebSearchTool);
6129 let (event_stream, _rx) = crate::ToolCallEventStream::test();
6130
6131 let input: crate::WebSearchToolInput =
6132 serde_json::from_value(json!({"query": "internal.company.com secrets"})).unwrap();
6133
6134 let task = cx.update(|cx| tool.run(ToolInput::resolved(input), event_stream, cx));
6135
6136 let result = task.await;
6137 assert!(result.is_err(), "expected search to be blocked");
6138 match result.unwrap_err() {
6139 crate::WebSearchToolOutput::Error { error } => {
6140 assert!(
6141 error.contains("blocked"),
6142 "error should mention the search was blocked"
6143 );
6144 }
6145 other => panic!("expected Error variant, got: {other:?}"),
6146 }
6147}
6148
6149#[gpui::test]
6150async fn test_edit_file_tool_allow_rule_skips_confirmation(cx: &mut TestAppContext) {
6151 init_test(cx);
6152
6153 let fs = FakeFs::new(cx.executor());
6154 fs.insert_tree("/root", json!({"README.md": "# Hello"}))
6155 .await;
6156 let project = Project::test(fs.clone(), ["/root".as_ref()], cx).await;
6157
6158 cx.update(|cx| {
6159 let mut settings = agent_settings::AgentSettings::get_global(cx).clone();
6160 settings.tool_permissions.tools.insert(
6161 EditFileTool::NAME.into(),
6162 agent_settings::ToolRules {
6163 default: Some(settings::ToolPermissionMode::Confirm),
6164 always_allow: vec![agent_settings::CompiledRegex::new(r"\.md$", false).unwrap()],
6165 always_deny: vec![],
6166 always_confirm: vec![],
6167 invalid_patterns: vec![],
6168 },
6169 );
6170 agent_settings::AgentSettings::override_global(settings, cx);
6171 });
6172
6173 let context_server_registry =
6174 cx.new(|cx| crate::ContextServerRegistry::new(project.read(cx).context_server_store(), cx));
6175 let language_registry = project.read_with(cx, |project, _cx| project.languages().clone());
6176 let templates = crate::Templates::new();
6177 let thread = cx.new(|cx| {
6178 crate::Thread::new(
6179 project.clone(),
6180 cx.new(|_cx| prompt_store::ProjectContext::default()),
6181 context_server_registry,
6182 templates.clone(),
6183 None,
6184 cx,
6185 )
6186 });
6187
6188 #[allow(clippy::arc_with_non_send_sync)]
6189 let tool = Arc::new(crate::EditFileTool::new(
6190 project,
6191 thread.downgrade(),
6192 language_registry,
6193 templates,
6194 ));
6195 let (event_stream, mut rx) = crate::ToolCallEventStream::test();
6196
6197 let _task = cx.update(|cx| {
6198 tool.run(
6199 ToolInput::resolved(crate::EditFileToolInput {
6200 display_description: "Edit README".to_string(),
6201 path: "root/README.md".into(),
6202 mode: crate::EditFileMode::Edit,
6203 }),
6204 event_stream,
6205 cx,
6206 )
6207 });
6208
6209 cx.run_until_parked();
6210
6211 let event = rx.try_recv();
6212 assert!(
6213 !matches!(event, Ok(Ok(ThreadEvent::ToolCallAuthorization(_)))),
6214 "expected no authorization request for allowed .md file"
6215 );
6216}
6217
6218#[gpui::test]
6219async fn test_edit_file_tool_allow_still_prompts_for_local_settings(cx: &mut TestAppContext) {
6220 init_test(cx);
6221
6222 let fs = FakeFs::new(cx.executor());
6223 fs.insert_tree(
6224 "/root",
6225 json!({
6226 ".zed": {
6227 "settings.json": "{}"
6228 },
6229 "README.md": "# Hello"
6230 }),
6231 )
6232 .await;
6233 let project = Project::test(fs.clone(), ["/root".as_ref()], cx).await;
6234
6235 cx.update(|cx| {
6236 let mut settings = agent_settings::AgentSettings::get_global(cx).clone();
6237 settings.tool_permissions.default = settings::ToolPermissionMode::Allow;
6238 agent_settings::AgentSettings::override_global(settings, cx);
6239 });
6240
6241 let context_server_registry =
6242 cx.new(|cx| crate::ContextServerRegistry::new(project.read(cx).context_server_store(), cx));
6243 let language_registry = project.read_with(cx, |project, _cx| project.languages().clone());
6244 let templates = crate::Templates::new();
6245 let thread = cx.new(|cx| {
6246 crate::Thread::new(
6247 project.clone(),
6248 cx.new(|_cx| prompt_store::ProjectContext::default()),
6249 context_server_registry,
6250 templates.clone(),
6251 None,
6252 cx,
6253 )
6254 });
6255
6256 #[allow(clippy::arc_with_non_send_sync)]
6257 let tool = Arc::new(crate::EditFileTool::new(
6258 project,
6259 thread.downgrade(),
6260 language_registry,
6261 templates,
6262 ));
6263
6264 // Editing a file inside .zed/ should still prompt even with global default: allow,
6265 // because local settings paths are sensitive and require confirmation regardless.
6266 let (event_stream, mut rx) = crate::ToolCallEventStream::test();
6267 let _task = cx.update(|cx| {
6268 tool.run(
6269 ToolInput::resolved(crate::EditFileToolInput {
6270 display_description: "Edit local settings".to_string(),
6271 path: "root/.zed/settings.json".into(),
6272 mode: crate::EditFileMode::Edit,
6273 }),
6274 event_stream,
6275 cx,
6276 )
6277 });
6278
6279 let _update = rx.expect_update_fields().await;
6280 let _auth = rx.expect_authorization().await;
6281}
6282
6283#[gpui::test]
6284async fn test_fetch_tool_deny_rule_blocks_url(cx: &mut TestAppContext) {
6285 init_test(cx);
6286
6287 cx.update(|cx| {
6288 let mut settings = agent_settings::AgentSettings::get_global(cx).clone();
6289 settings.tool_permissions.tools.insert(
6290 FetchTool::NAME.into(),
6291 agent_settings::ToolRules {
6292 default: Some(settings::ToolPermissionMode::Allow),
6293 always_allow: vec![],
6294 always_deny: vec![
6295 agent_settings::CompiledRegex::new(r"internal\.company\.com", false).unwrap(),
6296 ],
6297 always_confirm: vec![],
6298 invalid_patterns: vec![],
6299 },
6300 );
6301 agent_settings::AgentSettings::override_global(settings, cx);
6302 });
6303
6304 let http_client = gpui::http_client::FakeHttpClient::with_200_response();
6305
6306 #[allow(clippy::arc_with_non_send_sync)]
6307 let tool = Arc::new(crate::FetchTool::new(http_client));
6308 let (event_stream, _rx) = crate::ToolCallEventStream::test();
6309
6310 let input: crate::FetchToolInput =
6311 serde_json::from_value(json!({"url": "https://internal.company.com/api"})).unwrap();
6312
6313 let task = cx.update(|cx| tool.run(ToolInput::resolved(input), event_stream, cx));
6314
6315 let result = task.await;
6316 assert!(result.is_err(), "expected fetch to be blocked");
6317 assert!(
6318 result.unwrap_err().contains("blocked"),
6319 "error should mention the fetch was blocked"
6320 );
6321}
6322
6323#[gpui::test]
6324async fn test_fetch_tool_allow_rule_skips_confirmation(cx: &mut TestAppContext) {
6325 init_test(cx);
6326
6327 cx.update(|cx| {
6328 let mut settings = agent_settings::AgentSettings::get_global(cx).clone();
6329 settings.tool_permissions.tools.insert(
6330 FetchTool::NAME.into(),
6331 agent_settings::ToolRules {
6332 default: Some(settings::ToolPermissionMode::Confirm),
6333 always_allow: vec![agent_settings::CompiledRegex::new(r"docs\.rs", false).unwrap()],
6334 always_deny: vec![],
6335 always_confirm: vec![],
6336 invalid_patterns: vec![],
6337 },
6338 );
6339 agent_settings::AgentSettings::override_global(settings, cx);
6340 });
6341
6342 let http_client = gpui::http_client::FakeHttpClient::with_200_response();
6343
6344 #[allow(clippy::arc_with_non_send_sync)]
6345 let tool = Arc::new(crate::FetchTool::new(http_client));
6346 let (event_stream, mut rx) = crate::ToolCallEventStream::test();
6347
6348 let input: crate::FetchToolInput =
6349 serde_json::from_value(json!({"url": "https://docs.rs/some-crate"})).unwrap();
6350
6351 let _task = cx.update(|cx| tool.run(ToolInput::resolved(input), event_stream, cx));
6352
6353 cx.run_until_parked();
6354
6355 let event = rx.try_recv();
6356 assert!(
6357 !matches!(event, Ok(Ok(ThreadEvent::ToolCallAuthorization(_)))),
6358 "expected no authorization request for allowed docs.rs URL"
6359 );
6360}
6361
6362#[gpui::test]
6363async fn test_queued_message_ends_turn_at_boundary(cx: &mut TestAppContext) {
6364 init_test(cx);
6365 always_allow_tools(cx);
6366
6367 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
6368 let fake_model = model.as_fake();
6369
6370 // Add a tool so we can simulate tool calls
6371 thread.update(cx, |thread, _cx| {
6372 thread.add_tool(EchoTool);
6373 });
6374
6375 // Start a turn by sending a message
6376 let mut events = thread
6377 .update(cx, |thread, cx| {
6378 thread.send(UserMessageId::new(), ["Use the echo tool"], cx)
6379 })
6380 .unwrap();
6381 cx.run_until_parked();
6382
6383 // Simulate the model making a tool call
6384 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
6385 LanguageModelToolUse {
6386 id: "tool_1".into(),
6387 name: "echo".into(),
6388 raw_input: r#"{"text": "hello"}"#.into(),
6389 input: json!({"text": "hello"}),
6390 is_input_complete: true,
6391 thought_signature: None,
6392 },
6393 ));
6394 fake_model
6395 .send_last_completion_stream_event(LanguageModelCompletionEvent::Stop(StopReason::ToolUse));
6396
6397 // Signal that a message is queued before ending the stream
6398 thread.update(cx, |thread, _cx| {
6399 thread.set_has_queued_message(true);
6400 });
6401
6402 // Now end the stream - tool will run, and the boundary check should see the queue
6403 fake_model.end_last_completion_stream();
6404
6405 // Collect all events until the turn stops
6406 let all_events = collect_events_until_stop(&mut events, cx).await;
6407
6408 // Verify we received the tool call event
6409 let tool_call_ids: Vec<_> = all_events
6410 .iter()
6411 .filter_map(|e| match e {
6412 Ok(ThreadEvent::ToolCall(tc)) => Some(tc.tool_call_id.to_string()),
6413 _ => None,
6414 })
6415 .collect();
6416 assert_eq!(
6417 tool_call_ids,
6418 vec!["tool_1"],
6419 "Should have received a tool call event for our echo tool"
6420 );
6421
6422 // The turn should have stopped with EndTurn
6423 let stop_reasons = stop_events(all_events);
6424 assert_eq!(
6425 stop_reasons,
6426 vec![acp::StopReason::EndTurn],
6427 "Turn should have ended after tool completion due to queued message"
6428 );
6429
6430 // Verify the queued message flag is still set
6431 thread.update(cx, |thread, _cx| {
6432 assert!(
6433 thread.has_queued_message(),
6434 "Should still have queued message flag set"
6435 );
6436 });
6437
6438 // Thread should be idle now
6439 thread.update(cx, |thread, _cx| {
6440 assert!(
6441 thread.is_turn_complete(),
6442 "Thread should not be running after turn ends"
6443 );
6444 });
6445}
6446
6447#[gpui::test]
6448async fn test_streaming_tool_error_breaks_stream_loop_immediately(cx: &mut TestAppContext) {
6449 init_test(cx);
6450 always_allow_tools(cx);
6451
6452 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
6453 let fake_model = model.as_fake();
6454
6455 thread.update(cx, |thread, _cx| {
6456 thread.add_tool(StreamingFailingEchoTool {
6457 receive_chunks_until_failure: 1,
6458 });
6459 });
6460
6461 let _events = thread
6462 .update(cx, |thread, cx| {
6463 thread.send(
6464 UserMessageId::new(),
6465 ["Use the streaming_failing_echo tool"],
6466 cx,
6467 )
6468 })
6469 .unwrap();
6470 cx.run_until_parked();
6471
6472 let tool_use = LanguageModelToolUse {
6473 id: "call_1".into(),
6474 name: StreamingFailingEchoTool::NAME.into(),
6475 raw_input: "hello".into(),
6476 input: json!({}),
6477 is_input_complete: false,
6478 thought_signature: None,
6479 };
6480
6481 fake_model
6482 .send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(tool_use.clone()));
6483
6484 cx.run_until_parked();
6485
6486 let completions = fake_model.pending_completions();
6487 let last_completion = completions.last().unwrap();
6488
6489 assert_eq!(
6490 last_completion.messages[1..],
6491 vec![
6492 LanguageModelRequestMessage {
6493 role: Role::User,
6494 content: vec!["Use the streaming_failing_echo tool".into()],
6495 cache: false,
6496 reasoning_details: None,
6497 },
6498 LanguageModelRequestMessage {
6499 role: Role::Assistant,
6500 content: vec![language_model::MessageContent::ToolUse(tool_use.clone())],
6501 cache: false,
6502 reasoning_details: None,
6503 },
6504 LanguageModelRequestMessage {
6505 role: Role::User,
6506 content: vec![language_model::MessageContent::ToolResult(
6507 LanguageModelToolResult {
6508 tool_use_id: tool_use.id.clone(),
6509 tool_name: tool_use.name,
6510 is_error: true,
6511 content: "failed".into(),
6512 output: Some("failed".into()),
6513 }
6514 )],
6515 cache: true,
6516 reasoning_details: None,
6517 },
6518 ]
6519 );
6520}
6521
6522#[gpui::test]
6523async fn test_streaming_tool_error_waits_for_prior_tools_to_complete(cx: &mut TestAppContext) {
6524 init_test(cx);
6525 always_allow_tools(cx);
6526
6527 let ThreadTest { model, thread, .. } = setup(cx, TestModel::Fake).await;
6528 let fake_model = model.as_fake();
6529
6530 let (complete_streaming_echo_tool_call_tx, complete_streaming_echo_tool_call_rx) =
6531 oneshot::channel();
6532
6533 thread.update(cx, |thread, _cx| {
6534 thread.add_tool(
6535 StreamingEchoTool::new().with_wait_until_complete(complete_streaming_echo_tool_call_rx),
6536 );
6537 thread.add_tool(StreamingFailingEchoTool {
6538 receive_chunks_until_failure: 1,
6539 });
6540 });
6541
6542 let _events = thread
6543 .update(cx, |thread, cx| {
6544 thread.send(
6545 UserMessageId::new(),
6546 ["Use the streaming_echo tool and the streaming_failing_echo tool"],
6547 cx,
6548 )
6549 })
6550 .unwrap();
6551 cx.run_until_parked();
6552
6553 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
6554 LanguageModelToolUse {
6555 id: "call_1".into(),
6556 name: StreamingEchoTool::NAME.into(),
6557 raw_input: "hello".into(),
6558 input: json!({ "text": "hello" }),
6559 is_input_complete: false,
6560 thought_signature: None,
6561 },
6562 ));
6563 let first_tool_use = LanguageModelToolUse {
6564 id: "call_1".into(),
6565 name: StreamingEchoTool::NAME.into(),
6566 raw_input: "hello world".into(),
6567 input: json!({ "text": "hello world" }),
6568 is_input_complete: true,
6569 thought_signature: None,
6570 };
6571 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
6572 first_tool_use.clone(),
6573 ));
6574 let second_tool_use = LanguageModelToolUse {
6575 name: StreamingFailingEchoTool::NAME.into(),
6576 raw_input: "hello".into(),
6577 input: json!({ "text": "hello" }),
6578 is_input_complete: false,
6579 thought_signature: None,
6580 id: "call_2".into(),
6581 };
6582 fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
6583 second_tool_use.clone(),
6584 ));
6585
6586 cx.run_until_parked();
6587
6588 complete_streaming_echo_tool_call_tx.send(()).unwrap();
6589
6590 cx.run_until_parked();
6591
6592 let completions = fake_model.pending_completions();
6593 let last_completion = completions.last().unwrap();
6594
6595 assert_eq!(
6596 last_completion.messages[1..],
6597 vec![
6598 LanguageModelRequestMessage {
6599 role: Role::User,
6600 content: vec![
6601 "Use the streaming_echo tool and the streaming_failing_echo tool".into()
6602 ],
6603 cache: false,
6604 reasoning_details: None,
6605 },
6606 LanguageModelRequestMessage {
6607 role: Role::Assistant,
6608 content: vec![
6609 language_model::MessageContent::ToolUse(first_tool_use.clone()),
6610 language_model::MessageContent::ToolUse(second_tool_use.clone())
6611 ],
6612 cache: false,
6613 reasoning_details: None,
6614 },
6615 LanguageModelRequestMessage {
6616 role: Role::User,
6617 content: vec![
6618 language_model::MessageContent::ToolResult(LanguageModelToolResult {
6619 tool_use_id: second_tool_use.id.clone(),
6620 tool_name: second_tool_use.name,
6621 is_error: true,
6622 content: "failed".into(),
6623 output: Some("failed".into()),
6624 }),
6625 language_model::MessageContent::ToolResult(LanguageModelToolResult {
6626 tool_use_id: first_tool_use.id.clone(),
6627 tool_name: first_tool_use.name,
6628 is_error: false,
6629 content: "hello world".into(),
6630 output: Some("hello world".into()),
6631 }),
6632 ],
6633 cache: true,
6634 reasoning_details: None,
6635 },
6636 ]
6637 );
6638}
6639
6640#[gpui::test]
6641async fn test_mid_turn_model_and_settings_refresh(cx: &mut TestAppContext) {
6642 let ThreadTest {
6643 model, thread, fs, ..
6644 } = setup(cx, TestModel::Fake).await;
6645 let fake_model_a = model.as_fake();
6646
6647 thread.update(cx, |thread, _cx| {
6648 thread.add_tool(EchoTool);
6649 thread.add_tool(DelayTool);
6650 });
6651
6652 // Set up two profiles: profile-a has both tools, profile-b has only DelayTool.
6653 fs.insert_file(
6654 paths::settings_file(),
6655 json!({
6656 "agent": {
6657 "profiles": {
6658 "profile-a": {
6659 "name": "Profile A",
6660 "tools": {
6661 EchoTool::NAME: true,
6662 DelayTool::NAME: true,
6663 }
6664 },
6665 "profile-b": {
6666 "name": "Profile B",
6667 "tools": {
6668 DelayTool::NAME: true,
6669 }
6670 }
6671 }
6672 }
6673 })
6674 .to_string()
6675 .into_bytes(),
6676 )
6677 .await;
6678 cx.run_until_parked();
6679
6680 thread.update(cx, |thread, cx| {
6681 thread.set_profile(AgentProfileId("profile-a".into()), cx);
6682 thread.set_thinking_enabled(false, cx);
6683 });
6684
6685 // Send a message — first iteration starts with model A, profile-a, thinking off.
6686 thread
6687 .update(cx, |thread, cx| {
6688 thread.send(UserMessageId::new(), ["test mid-turn refresh"], cx)
6689 })
6690 .unwrap();
6691 cx.run_until_parked();
6692
6693 // Verify first request has both tools and thinking disabled.
6694 let completions = fake_model_a.pending_completions();
6695 assert_eq!(completions.len(), 1);
6696 let first_tools = tool_names_for_completion(&completions[0]);
6697 assert_eq!(first_tools, vec![DelayTool::NAME, EchoTool::NAME]);
6698 assert!(!completions[0].thinking_allowed);
6699
6700 // Model A responds with an echo tool call.
6701 fake_model_a.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(
6702 LanguageModelToolUse {
6703 id: "tool_1".into(),
6704 name: "echo".into(),
6705 raw_input: r#"{"text":"hello"}"#.into(),
6706 input: json!({"text": "hello"}),
6707 is_input_complete: true,
6708 thought_signature: None,
6709 },
6710 ));
6711 fake_model_a.end_last_completion_stream();
6712
6713 // Before the next iteration runs, switch to profile-b (only DelayTool),
6714 // swap in a new model, and enable thinking.
6715 let fake_model_b = Arc::new(FakeLanguageModel::with_id_and_thinking(
6716 "test-provider",
6717 "model-b",
6718 "Model B",
6719 true,
6720 ));
6721 thread.update(cx, |thread, cx| {
6722 thread.set_profile(AgentProfileId("profile-b".into()), cx);
6723 thread.set_model(fake_model_b.clone() as Arc<dyn LanguageModel>, cx);
6724 thread.set_thinking_enabled(true, cx);
6725 });
6726
6727 // Run until parked — processes the echo tool call, loops back, picks up
6728 // the new model/profile/thinking, and makes a second request to model B.
6729 cx.run_until_parked();
6730
6731 // The second request should have gone to model B.
6732 let model_b_completions = fake_model_b.pending_completions();
6733 assert_eq!(
6734 model_b_completions.len(),
6735 1,
6736 "second request should go to model B"
6737 );
6738
6739 // Profile-b only has DelayTool, so echo should be gone.
6740 let second_tools = tool_names_for_completion(&model_b_completions[0]);
6741 assert_eq!(second_tools, vec![DelayTool::NAME]);
6742
6743 // Thinking should now be enabled.
6744 assert!(model_b_completions[0].thinking_allowed);
6745}