1use std::pin::Pin;
2use std::str::FromStr as _;
3use std::sync::Arc;
4
5use anthropic::AnthropicModelMode;
6use anyhow::{Result, anyhow};
7use collections::HashMap;
8use copilot::{GlobalCopilotAuth, Status};
9use copilot_chat::responses as copilot_responses;
10use copilot_chat::{
11 ChatLocation, ChatMessage, ChatMessageContent, ChatMessagePart, CopilotChat,
12 CopilotChatConfiguration, Function, FunctionContent, ImageUrl, Model as CopilotChatModel,
13 ModelVendor, Request as CopilotChatRequest, ResponseEvent, Tool, ToolCall, ToolCallContent,
14 ToolChoice,
15};
16use futures::future::BoxFuture;
17use futures::stream::BoxStream;
18use futures::{FutureExt, Stream, StreamExt};
19use gpui::{AnyView, App, AsyncApp, Entity, Subscription, Task};
20use http_client::StatusCode;
21use language::language_settings::all_language_settings;
22use language_model::{
23 AuthenticateError, CompletionIntent, IconOrSvg, LanguageModel, LanguageModelCompletionError,
24 LanguageModelCompletionEvent, LanguageModelCostInfo, LanguageModelEffortLevel, LanguageModelId,
25 LanguageModelName, LanguageModelProvider, LanguageModelProviderId, LanguageModelProviderName,
26 LanguageModelProviderState, LanguageModelRequest, LanguageModelRequestMessage,
27 LanguageModelToolChoice, LanguageModelToolResultContent, LanguageModelToolSchemaFormat,
28 LanguageModelToolUse, MessageContent, RateLimiter, Role, StopReason, TokenUsage,
29};
30use settings::SettingsStore;
31use ui::prelude::*;
32use util::debug_panic;
33
34use crate::provider::anthropic::{AnthropicEventMapper, into_anthropic};
35use language_model::util::{fix_streamed_json, parse_tool_arguments};
36
37const PROVIDER_ID: LanguageModelProviderId = LanguageModelProviderId::new("copilot_chat");
38const PROVIDER_NAME: LanguageModelProviderName =
39 LanguageModelProviderName::new("GitHub Copilot Chat");
40
41pub struct CopilotChatLanguageModelProvider {
42 state: Entity<State>,
43}
44
45pub struct State {
46 _copilot_chat_subscription: Option<Subscription>,
47 _settings_subscription: Subscription,
48}
49
50impl State {
51 fn is_authenticated(&self, cx: &App) -> bool {
52 CopilotChat::global(cx)
53 .map(|m| m.read(cx).is_authenticated())
54 .unwrap_or(false)
55 }
56}
57
58impl CopilotChatLanguageModelProvider {
59 pub fn new(cx: &mut App) -> Self {
60 let state = cx.new(|cx| {
61 let copilot_chat_subscription = CopilotChat::global(cx)
62 .map(|copilot_chat| cx.observe(&copilot_chat, |_, _, cx| cx.notify()));
63 State {
64 _copilot_chat_subscription: copilot_chat_subscription,
65 _settings_subscription: cx.observe_global::<SettingsStore>(|_, cx| {
66 if let Some(copilot_chat) = CopilotChat::global(cx) {
67 let language_settings = all_language_settings(None, cx);
68 let configuration = CopilotChatConfiguration {
69 enterprise_uri: language_settings
70 .edit_predictions
71 .copilot
72 .enterprise_uri
73 .clone(),
74 };
75 copilot_chat.update(cx, |chat, cx| {
76 chat.set_configuration(configuration, cx);
77 });
78 }
79 cx.notify();
80 }),
81 }
82 });
83
84 Self { state }
85 }
86
87 fn create_language_model(&self, model: CopilotChatModel) -> Arc<dyn LanguageModel> {
88 Arc::new(CopilotChatLanguageModel {
89 model,
90 request_limiter: RateLimiter::new(4),
91 })
92 }
93}
94
95impl LanguageModelProviderState for CopilotChatLanguageModelProvider {
96 type ObservableEntity = State;
97
98 fn observable_entity(&self) -> Option<Entity<Self::ObservableEntity>> {
99 Some(self.state.clone())
100 }
101}
102
103impl LanguageModelProvider for CopilotChatLanguageModelProvider {
104 fn id(&self) -> LanguageModelProviderId {
105 PROVIDER_ID
106 }
107
108 fn name(&self) -> LanguageModelProviderName {
109 PROVIDER_NAME
110 }
111
112 fn icon(&self) -> IconOrSvg {
113 IconOrSvg::Icon(IconName::Copilot)
114 }
115
116 fn default_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
117 let models = CopilotChat::global(cx).and_then(|m| m.read(cx).models())?;
118 models
119 .first()
120 .map(|model| self.create_language_model(model.clone()))
121 }
122
123 fn default_fast_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
124 // The default model should be Copilot Chat's 'base model', which is likely a relatively fast
125 // model (e.g. 4o) and a sensible choice when considering premium requests
126 self.default_model(cx)
127 }
128
129 fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
130 let Some(models) = CopilotChat::global(cx).and_then(|m| m.read(cx).models()) else {
131 return Vec::new();
132 };
133 models
134 .iter()
135 .map(|model| self.create_language_model(model.clone()))
136 .collect()
137 }
138
139 fn is_authenticated(&self, cx: &App) -> bool {
140 self.state.read(cx).is_authenticated(cx)
141 }
142
143 fn authenticate(&self, cx: &mut App) -> Task<Result<(), AuthenticateError>> {
144 if self.is_authenticated(cx) {
145 return Task::ready(Ok(()));
146 };
147
148 let Some(copilot) = GlobalCopilotAuth::try_global(cx).cloned() else {
149 return Task::ready(Err(anyhow!(concat!(
150 "Copilot must be enabled for Copilot Chat to work. ",
151 "Please enable Copilot and try again."
152 ))
153 .into()));
154 };
155
156 let err = match copilot.0.read(cx).status() {
157 Status::Authorized => return Task::ready(Ok(())),
158 Status::Disabled => anyhow!(
159 "Copilot must be enabled for Copilot Chat to work. Please enable Copilot and try again."
160 ),
161 Status::Error(err) => anyhow!(format!(
162 "Received the following error while signing into Copilot: {err}"
163 )),
164 Status::Starting { task: _ } => anyhow!(
165 "Copilot is still starting, please wait for Copilot to start then try again"
166 ),
167 Status::Unauthorized => anyhow!(
168 "Unable to authorize with Copilot. Please make sure that you have an active Copilot and Copilot Chat subscription."
169 ),
170 Status::SignedOut { .. } => {
171 anyhow!("You have signed out of Copilot. Please sign in to Copilot and try again.")
172 }
173 Status::SigningIn { prompt: _ } => anyhow!("Still signing into Copilot..."),
174 };
175
176 Task::ready(Err(err.into()))
177 }
178
179 fn configuration_view(
180 &self,
181 _target_agent: language_model::ConfigurationViewTargetAgent,
182 _: &mut Window,
183 cx: &mut App,
184 ) -> AnyView {
185 cx.new(|cx| {
186 copilot_ui::ConfigurationView::new(
187 |cx| {
188 CopilotChat::global(cx)
189 .map(|m| m.read(cx).is_authenticated())
190 .unwrap_or(false)
191 },
192 copilot_ui::ConfigurationMode::Chat,
193 cx,
194 )
195 })
196 .into()
197 }
198
199 fn reset_credentials(&self, _cx: &mut App) -> Task<Result<()>> {
200 Task::ready(Err(anyhow!(
201 "Signing out of GitHub Copilot Chat is currently not supported."
202 )))
203 }
204}
205
206fn collect_tiktoken_messages(
207 request: LanguageModelRequest,
208) -> Vec<tiktoken_rs::ChatCompletionRequestMessage> {
209 request
210 .messages
211 .into_iter()
212 .map(|message| tiktoken_rs::ChatCompletionRequestMessage {
213 role: match message.role {
214 Role::User => "user".into(),
215 Role::Assistant => "assistant".into(),
216 Role::System => "system".into(),
217 },
218 content: Some(message.string_contents()),
219 name: None,
220 function_call: None,
221 })
222 .collect::<Vec<_>>()
223}
224
225pub struct CopilotChatLanguageModel {
226 model: CopilotChatModel,
227 request_limiter: RateLimiter,
228}
229
230impl LanguageModel for CopilotChatLanguageModel {
231 fn id(&self) -> LanguageModelId {
232 LanguageModelId::from(self.model.id().to_string())
233 }
234
235 fn name(&self) -> LanguageModelName {
236 LanguageModelName::from(self.model.display_name().to_string())
237 }
238
239 fn provider_id(&self) -> LanguageModelProviderId {
240 PROVIDER_ID
241 }
242
243 fn provider_name(&self) -> LanguageModelProviderName {
244 PROVIDER_NAME
245 }
246
247 fn supports_tools(&self) -> bool {
248 self.model.supports_tools()
249 }
250
251 fn supports_streaming_tools(&self) -> bool {
252 true
253 }
254
255 fn supports_images(&self) -> bool {
256 self.model.supports_vision()
257 }
258
259 fn supports_thinking(&self) -> bool {
260 self.model.can_think()
261 }
262
263 fn supported_effort_levels(&self) -> Vec<LanguageModelEffortLevel> {
264 let levels = self.model.reasoning_effort_levels();
265 if levels.is_empty() {
266 return vec![];
267 }
268 levels
269 .iter()
270 .map(|level| {
271 let name = match level.as_str() {
272 "low" => "Low".into(),
273 "medium" => "Medium".into(),
274 "high" => "High".into(),
275 _ => language_model::SharedString::from(level.clone()),
276 };
277 LanguageModelEffortLevel {
278 name,
279 value: language_model::SharedString::from(level.clone()),
280 is_default: level == "high",
281 }
282 })
283 .collect()
284 }
285
286 fn tool_input_format(&self) -> LanguageModelToolSchemaFormat {
287 match self.model.vendor() {
288 ModelVendor::OpenAI | ModelVendor::Anthropic => {
289 LanguageModelToolSchemaFormat::JsonSchema
290 }
291 ModelVendor::Google | ModelVendor::XAI | ModelVendor::Unknown => {
292 LanguageModelToolSchemaFormat::JsonSchemaSubset
293 }
294 }
295 }
296
297 fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
298 match choice {
299 LanguageModelToolChoice::Auto
300 | LanguageModelToolChoice::Any
301 | LanguageModelToolChoice::None => self.supports_tools(),
302 }
303 }
304
305 fn model_cost_info(&self) -> Option<LanguageModelCostInfo> {
306 LanguageModelCostInfo::RequestCost {
307 cost_per_request: self.model.multiplier(),
308 }
309 .into()
310 }
311
312 fn telemetry_id(&self) -> String {
313 format!("copilot_chat/{}", self.model.id())
314 }
315
316 fn max_token_count(&self) -> u64 {
317 self.model.max_token_count()
318 }
319
320 fn count_tokens(
321 &self,
322 request: LanguageModelRequest,
323 cx: &App,
324 ) -> BoxFuture<'static, Result<u64>> {
325 let model = self.model.clone();
326 cx.background_spawn(async move {
327 let messages = collect_tiktoken_messages(request);
328 // Copilot uses OpenAI tiktoken tokenizer for all it's model irrespective of the underlying provider(vendor).
329 let tokenizer_model = match model.tokenizer() {
330 Some("o200k_base") => "gpt-4o",
331 Some("cl100k_base") => "gpt-4",
332 _ => "gpt-4o",
333 };
334
335 tiktoken_rs::num_tokens_from_messages(tokenizer_model, &messages)
336 .map(|tokens| tokens as u64)
337 })
338 .boxed()
339 }
340
341 fn stream_completion(
342 &self,
343 request: LanguageModelRequest,
344 cx: &AsyncApp,
345 ) -> BoxFuture<
346 'static,
347 Result<
348 BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>,
349 LanguageModelCompletionError,
350 >,
351 > {
352 let is_user_initiated = request.intent.is_none_or(|intent| match intent {
353 CompletionIntent::UserPrompt
354 | CompletionIntent::ThreadContextSummarization
355 | CompletionIntent::InlineAssist
356 | CompletionIntent::TerminalInlineAssist
357 | CompletionIntent::GenerateGitCommitMessage => true,
358
359 CompletionIntent::Subagent
360 | CompletionIntent::ToolResults
361 | CompletionIntent::ThreadSummarization
362 | CompletionIntent::CreateFile
363 | CompletionIntent::EditFile => false,
364 });
365
366 if self.model.supports_messages() {
367 let location = intent_to_chat_location(request.intent);
368 let model = self.model.clone();
369 let request_limiter = self.request_limiter.clone();
370 let future = cx.spawn(async move |cx| {
371 let effort = request
372 .thinking_effort
373 .as_ref()
374 .and_then(|e| anthropic::Effort::from_str(e).ok());
375
376 let mut anthropic_request = into_anthropic(
377 request,
378 model.id().to_string(),
379 0.0,
380 model.max_output_tokens() as u64,
381 if model.supports_adaptive_thinking() {
382 AnthropicModelMode::Thinking {
383 budget_tokens: None,
384 }
385 } else if model.can_think() {
386 AnthropicModelMode::Thinking {
387 budget_tokens: compute_thinking_budget(
388 model.min_thinking_budget(),
389 model.max_thinking_budget(),
390 model.max_output_tokens() as u32,
391 ),
392 }
393 } else {
394 AnthropicModelMode::Default
395 },
396 );
397
398 anthropic_request.temperature = None;
399
400 // The Copilot proxy doesn't support eager_input_streaming on tools.
401 for tool in &mut anthropic_request.tools {
402 tool.eager_input_streaming = false;
403 }
404
405 if model.supports_adaptive_thinking() {
406 if anthropic_request.thinking.is_some() {
407 anthropic_request.thinking = Some(anthropic::Thinking::Adaptive);
408 anthropic_request.output_config =
409 effort.map(|effort| anthropic::OutputConfig {
410 effort: Some(effort),
411 });
412 }
413 }
414
415 let anthropic_beta = if !model.supports_adaptive_thinking() && model.can_think() {
416 Some("interleaved-thinking-2025-05-14".to_string())
417 } else {
418 None
419 };
420
421 let body = serde_json::to_string(&anthropic::StreamingRequest {
422 base: anthropic_request,
423 stream: true,
424 })
425 .map_err(|e| anyhow::anyhow!(e))?;
426
427 let stream = CopilotChat::stream_messages(
428 body,
429 location,
430 is_user_initiated,
431 anthropic_beta,
432 cx.clone(),
433 );
434
435 request_limiter
436 .stream(async move {
437 let events = stream.await?;
438 let mapper = AnthropicEventMapper::new();
439 Ok(mapper.map_stream(events).boxed())
440 })
441 .await
442 });
443 return async move { Ok(future.await?.boxed()) }.boxed();
444 }
445
446 if self.model.supports_response() {
447 let location = intent_to_chat_location(request.intent);
448 let responses_request = into_copilot_responses(&self.model, request);
449 let request_limiter = self.request_limiter.clone();
450 let future = cx.spawn(async move |cx| {
451 let request = CopilotChat::stream_response(
452 responses_request,
453 location,
454 is_user_initiated,
455 cx.clone(),
456 );
457 request_limiter
458 .stream(async move {
459 let stream = request.await?;
460 let mapper = CopilotResponsesEventMapper::new();
461 Ok(mapper.map_stream(stream).boxed())
462 })
463 .await
464 });
465 return async move { Ok(future.await?.boxed()) }.boxed();
466 }
467
468 let location = intent_to_chat_location(request.intent);
469 let copilot_request = match into_copilot_chat(&self.model, request) {
470 Ok(request) => request,
471 Err(err) => return futures::future::ready(Err(err.into())).boxed(),
472 };
473 let is_streaming = copilot_request.stream;
474
475 let request_limiter = self.request_limiter.clone();
476 let future = cx.spawn(async move |cx| {
477 let request = CopilotChat::stream_completion(
478 copilot_request,
479 location,
480 is_user_initiated,
481 cx.clone(),
482 );
483 request_limiter
484 .stream(async move {
485 let response = request.await?;
486 Ok(map_to_language_model_completion_events(
487 response,
488 is_streaming,
489 ))
490 })
491 .await
492 });
493 async move { Ok(future.await?.boxed()) }.boxed()
494 }
495}
496
497pub fn map_to_language_model_completion_events(
498 events: Pin<Box<dyn Send + Stream<Item = Result<ResponseEvent>>>>,
499 is_streaming: bool,
500) -> impl Stream<Item = Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
501 #[derive(Default)]
502 struct RawToolCall {
503 id: String,
504 name: String,
505 arguments: String,
506 thought_signature: Option<String>,
507 }
508
509 struct State {
510 events: Pin<Box<dyn Send + Stream<Item = Result<ResponseEvent>>>>,
511 tool_calls_by_index: HashMap<usize, RawToolCall>,
512 reasoning_opaque: Option<String>,
513 reasoning_text: Option<String>,
514 }
515
516 futures::stream::unfold(
517 State {
518 events,
519 tool_calls_by_index: HashMap::default(),
520 reasoning_opaque: None,
521 reasoning_text: None,
522 },
523 move |mut state| async move {
524 if let Some(event) = state.events.next().await {
525 match event {
526 Ok(event) => {
527 let Some(choice) = event.choices.first() else {
528 return Some((
529 vec![Err(anyhow!("Response contained no choices").into())],
530 state,
531 ));
532 };
533
534 let delta = if is_streaming {
535 choice.delta.as_ref()
536 } else {
537 choice.message.as_ref()
538 };
539
540 let Some(delta) = delta else {
541 return Some((
542 vec![Err(anyhow!("Response contained no delta").into())],
543 state,
544 ));
545 };
546
547 let mut events = Vec::new();
548 if let Some(content) = delta.content.clone() {
549 events.push(Ok(LanguageModelCompletionEvent::Text(content)));
550 }
551
552 // Capture reasoning data from the delta (e.g. for Gemini 3)
553 if let Some(opaque) = delta.reasoning_opaque.clone() {
554 state.reasoning_opaque = Some(opaque);
555 }
556 if let Some(text) = delta.reasoning_text.clone() {
557 state.reasoning_text = Some(text);
558 }
559
560 for (index, tool_call) in delta.tool_calls.iter().enumerate() {
561 let tool_index = tool_call.index.unwrap_or(index);
562 let entry = state.tool_calls_by_index.entry(tool_index).or_default();
563
564 if let Some(tool_id) = tool_call.id.clone() {
565 entry.id = tool_id;
566 }
567
568 if let Some(function) = tool_call.function.as_ref() {
569 if let Some(name) = function.name.clone() {
570 entry.name = name;
571 }
572
573 if let Some(arguments) = function.arguments.clone() {
574 entry.arguments.push_str(&arguments);
575 }
576
577 if let Some(thought_signature) = function.thought_signature.clone()
578 {
579 entry.thought_signature = Some(thought_signature);
580 }
581 }
582
583 if !entry.id.is_empty() && !entry.name.is_empty() {
584 if let Ok(input) = serde_json::from_str::<serde_json::Value>(
585 &fix_streamed_json(&entry.arguments),
586 ) {
587 events.push(Ok(LanguageModelCompletionEvent::ToolUse(
588 LanguageModelToolUse {
589 id: entry.id.clone().into(),
590 name: entry.name.as_str().into(),
591 is_input_complete: false,
592 input,
593 raw_input: entry.arguments.clone(),
594 thought_signature: entry.thought_signature.clone(),
595 },
596 )));
597 }
598 }
599 }
600
601 if let Some(usage) = event.usage {
602 events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(
603 TokenUsage {
604 input_tokens: usage.prompt_tokens,
605 output_tokens: usage.completion_tokens,
606 cache_creation_input_tokens: 0,
607 cache_read_input_tokens: 0,
608 },
609 )));
610 }
611
612 match choice.finish_reason.as_deref() {
613 Some("stop") => {
614 events.push(Ok(LanguageModelCompletionEvent::Stop(
615 StopReason::EndTurn,
616 )));
617 }
618 Some("tool_calls") => {
619 // Gemini 3 models send reasoning_opaque/reasoning_text that must
620 // be preserved and sent back in subsequent requests. Emit as
621 // ReasoningDetails so the agent stores it in the message.
622 if state.reasoning_opaque.is_some()
623 || state.reasoning_text.is_some()
624 {
625 let mut details = serde_json::Map::new();
626 if let Some(opaque) = state.reasoning_opaque.take() {
627 details.insert(
628 "reasoning_opaque".to_string(),
629 serde_json::Value::String(opaque),
630 );
631 }
632 if let Some(text) = state.reasoning_text.take() {
633 details.insert(
634 "reasoning_text".to_string(),
635 serde_json::Value::String(text),
636 );
637 }
638 events.push(Ok(
639 LanguageModelCompletionEvent::ReasoningDetails(
640 serde_json::Value::Object(details),
641 ),
642 ));
643 }
644
645 events.extend(state.tool_calls_by_index.drain().map(
646 |(_, tool_call)| match parse_tool_arguments(
647 &tool_call.arguments,
648 ) {
649 Ok(input) => Ok(LanguageModelCompletionEvent::ToolUse(
650 LanguageModelToolUse {
651 id: tool_call.id.into(),
652 name: tool_call.name.as_str().into(),
653 is_input_complete: true,
654 input,
655 raw_input: tool_call.arguments,
656 thought_signature: tool_call.thought_signature,
657 },
658 )),
659 Err(error) => Ok(
660 LanguageModelCompletionEvent::ToolUseJsonParseError {
661 id: tool_call.id.into(),
662 tool_name: tool_call.name.as_str().into(),
663 raw_input: tool_call.arguments.into(),
664 json_parse_error: error.to_string(),
665 },
666 ),
667 },
668 ));
669
670 events.push(Ok(LanguageModelCompletionEvent::Stop(
671 StopReason::ToolUse,
672 )));
673 }
674 Some(stop_reason) => {
675 log::error!("Unexpected Copilot Chat stop_reason: {stop_reason:?}");
676 events.push(Ok(LanguageModelCompletionEvent::Stop(
677 StopReason::EndTurn,
678 )));
679 }
680 None => {}
681 }
682
683 return Some((events, state));
684 }
685 Err(err) => return Some((vec![Err(anyhow!(err).into())], state)),
686 }
687 }
688
689 None
690 },
691 )
692 .flat_map(futures::stream::iter)
693}
694
695pub struct CopilotResponsesEventMapper {
696 pending_stop_reason: Option<StopReason>,
697}
698
699impl CopilotResponsesEventMapper {
700 pub fn new() -> Self {
701 Self {
702 pending_stop_reason: None,
703 }
704 }
705
706 pub fn map_stream(
707 mut self,
708 events: Pin<Box<dyn Send + Stream<Item = Result<copilot_responses::StreamEvent>>>>,
709 ) -> impl Stream<Item = Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
710 {
711 events.flat_map(move |event| {
712 futures::stream::iter(match event {
713 Ok(event) => self.map_event(event),
714 Err(error) => vec![Err(LanguageModelCompletionError::from(anyhow!(error)))],
715 })
716 })
717 }
718
719 fn map_event(
720 &mut self,
721 event: copilot_responses::StreamEvent,
722 ) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
723 match event {
724 copilot_responses::StreamEvent::OutputItemAdded { item, .. } => match item {
725 copilot_responses::ResponseOutputItem::Message { id, .. } => {
726 vec![Ok(LanguageModelCompletionEvent::StartMessage {
727 message_id: id,
728 })]
729 }
730 _ => Vec::new(),
731 },
732
733 copilot_responses::StreamEvent::OutputTextDelta { delta, .. } => {
734 if delta.is_empty() {
735 Vec::new()
736 } else {
737 vec![Ok(LanguageModelCompletionEvent::Text(delta))]
738 }
739 }
740
741 copilot_responses::StreamEvent::OutputItemDone { item, .. } => match item {
742 copilot_responses::ResponseOutputItem::Message { .. } => Vec::new(),
743 copilot_responses::ResponseOutputItem::FunctionCall {
744 call_id,
745 name,
746 arguments,
747 thought_signature,
748 ..
749 } => {
750 let mut events = Vec::new();
751 match parse_tool_arguments(&arguments) {
752 Ok(input) => events.push(Ok(LanguageModelCompletionEvent::ToolUse(
753 LanguageModelToolUse {
754 id: call_id.into(),
755 name: name.as_str().into(),
756 is_input_complete: true,
757 input,
758 raw_input: arguments.clone(),
759 thought_signature,
760 },
761 ))),
762 Err(error) => {
763 events.push(Ok(LanguageModelCompletionEvent::ToolUseJsonParseError {
764 id: call_id.into(),
765 tool_name: name.as_str().into(),
766 raw_input: arguments.clone().into(),
767 json_parse_error: error.to_string(),
768 }))
769 }
770 }
771 // Record that we already emitted a tool-use stop so we can avoid duplicating
772 // a Stop event on Completed.
773 self.pending_stop_reason = Some(StopReason::ToolUse);
774 events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::ToolUse)));
775 events
776 }
777 copilot_responses::ResponseOutputItem::Reasoning {
778 summary,
779 encrypted_content,
780 ..
781 } => {
782 let mut events = Vec::new();
783
784 if let Some(blocks) = summary {
785 let mut text = String::new();
786 for block in blocks {
787 text.push_str(&block.text);
788 }
789 if !text.is_empty() {
790 events.push(Ok(LanguageModelCompletionEvent::Thinking {
791 text,
792 signature: None,
793 }));
794 }
795 }
796
797 if let Some(data) = encrypted_content {
798 events.push(Ok(LanguageModelCompletionEvent::RedactedThinking { data }));
799 }
800
801 events
802 }
803 },
804
805 copilot_responses::StreamEvent::Completed { response } => {
806 let mut events = Vec::new();
807 if let Some(usage) = response.usage {
808 events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
809 input_tokens: usage.input_tokens.unwrap_or(0),
810 output_tokens: usage.output_tokens.unwrap_or(0),
811 cache_creation_input_tokens: 0,
812 cache_read_input_tokens: 0,
813 })));
814 }
815 if self.pending_stop_reason.take() != Some(StopReason::ToolUse) {
816 events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::EndTurn)));
817 }
818 events
819 }
820
821 copilot_responses::StreamEvent::Incomplete { response } => {
822 let reason = response
823 .incomplete_details
824 .as_ref()
825 .and_then(|details| details.reason.as_ref());
826 let stop_reason = match reason {
827 Some(copilot_responses::IncompleteReason::MaxOutputTokens) => {
828 StopReason::MaxTokens
829 }
830 Some(copilot_responses::IncompleteReason::ContentFilter) => StopReason::Refusal,
831 _ => self
832 .pending_stop_reason
833 .take()
834 .unwrap_or(StopReason::EndTurn),
835 };
836
837 let mut events = Vec::new();
838 if let Some(usage) = response.usage {
839 events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
840 input_tokens: usage.input_tokens.unwrap_or(0),
841 output_tokens: usage.output_tokens.unwrap_or(0),
842 cache_creation_input_tokens: 0,
843 cache_read_input_tokens: 0,
844 })));
845 }
846 events.push(Ok(LanguageModelCompletionEvent::Stop(stop_reason)));
847 events
848 }
849
850 copilot_responses::StreamEvent::Failed { response } => {
851 let provider = PROVIDER_NAME;
852 let (status_code, message) = match response.error {
853 Some(error) => {
854 let status_code = StatusCode::from_str(&error.code)
855 .unwrap_or(StatusCode::INTERNAL_SERVER_ERROR);
856 (status_code, error.message)
857 }
858 None => (
859 StatusCode::INTERNAL_SERVER_ERROR,
860 "response.failed".to_string(),
861 ),
862 };
863 vec![Err(LanguageModelCompletionError::HttpResponseError {
864 provider,
865 status_code,
866 message,
867 })]
868 }
869
870 copilot_responses::StreamEvent::GenericError { error } => vec![Err(
871 LanguageModelCompletionError::Other(anyhow!(error.message)),
872 )],
873
874 copilot_responses::StreamEvent::Created { .. }
875 | copilot_responses::StreamEvent::Unknown => Vec::new(),
876 }
877 }
878}
879
880fn into_copilot_chat(
881 model: &CopilotChatModel,
882 request: LanguageModelRequest,
883) -> Result<CopilotChatRequest> {
884 let temperature = request.temperature;
885 let tool_choice = request.tool_choice;
886
887 let mut request_messages: Vec<LanguageModelRequestMessage> = Vec::new();
888 for message in request.messages {
889 if let Some(last_message) = request_messages.last_mut() {
890 if last_message.role == message.role {
891 last_message.content.extend(message.content);
892 } else {
893 request_messages.push(message);
894 }
895 } else {
896 request_messages.push(message);
897 }
898 }
899
900 let mut messages: Vec<ChatMessage> = Vec::new();
901 for message in request_messages {
902 match message.role {
903 Role::User => {
904 for content in &message.content {
905 if let MessageContent::ToolResult(tool_result) = content {
906 let content = match &tool_result.content {
907 LanguageModelToolResultContent::Text(text) => text.to_string().into(),
908 LanguageModelToolResultContent::Image(image) => {
909 if model.supports_vision() {
910 ChatMessageContent::Multipart(vec![ChatMessagePart::Image {
911 image_url: ImageUrl {
912 url: image.to_base64_url(),
913 },
914 }])
915 } else {
916 debug_panic!(
917 "This should be caught at {} level",
918 tool_result.tool_name
919 );
920 "[Tool responded with an image, but this model does not support vision]".to_string().into()
921 }
922 }
923 };
924
925 messages.push(ChatMessage::Tool {
926 tool_call_id: tool_result.tool_use_id.to_string(),
927 content,
928 });
929 }
930 }
931
932 let mut content_parts = Vec::new();
933 for content in &message.content {
934 match content {
935 MessageContent::Text(text) | MessageContent::Thinking { text, .. }
936 if !text.is_empty() =>
937 {
938 if let Some(ChatMessagePart::Text { text: text_content }) =
939 content_parts.last_mut()
940 {
941 text_content.push_str(text);
942 } else {
943 content_parts.push(ChatMessagePart::Text {
944 text: text.to_string(),
945 });
946 }
947 }
948 MessageContent::Image(image) if model.supports_vision() => {
949 content_parts.push(ChatMessagePart::Image {
950 image_url: ImageUrl {
951 url: image.to_base64_url(),
952 },
953 });
954 }
955 _ => {}
956 }
957 }
958
959 if !content_parts.is_empty() {
960 messages.push(ChatMessage::User {
961 content: content_parts.into(),
962 });
963 }
964 }
965 Role::Assistant => {
966 let mut tool_calls = Vec::new();
967 for content in &message.content {
968 if let MessageContent::ToolUse(tool_use) = content {
969 tool_calls.push(ToolCall {
970 id: tool_use.id.to_string(),
971 content: ToolCallContent::Function {
972 function: FunctionContent {
973 name: tool_use.name.to_string(),
974 arguments: serde_json::to_string(&tool_use.input)?,
975 thought_signature: tool_use.thought_signature.clone(),
976 },
977 },
978 });
979 }
980 }
981
982 let text_content = {
983 let mut buffer = String::new();
984 for string in message.content.iter().filter_map(|content| match content {
985 MessageContent::Text(text) => Some(text.as_str()),
986 MessageContent::Thinking { .. }
987 | MessageContent::ToolUse(_)
988 | MessageContent::RedactedThinking(_)
989 | MessageContent::ToolResult(_)
990 | MessageContent::Image(_) => None,
991 }) {
992 buffer.push_str(string);
993 }
994
995 buffer
996 };
997
998 // Extract reasoning_opaque and reasoning_text from reasoning_details
999 let (reasoning_opaque, reasoning_text) =
1000 if let Some(details) = &message.reasoning_details {
1001 let opaque = details
1002 .get("reasoning_opaque")
1003 .and_then(|v| v.as_str())
1004 .map(|s| s.to_string());
1005 let text = details
1006 .get("reasoning_text")
1007 .and_then(|v| v.as_str())
1008 .map(|s| s.to_string());
1009 (opaque, text)
1010 } else {
1011 (None, None)
1012 };
1013
1014 messages.push(ChatMessage::Assistant {
1015 content: if text_content.is_empty() {
1016 ChatMessageContent::empty()
1017 } else {
1018 text_content.into()
1019 },
1020 tool_calls,
1021 reasoning_opaque,
1022 reasoning_text,
1023 });
1024 }
1025 Role::System => messages.push(ChatMessage::System {
1026 content: message.string_contents(),
1027 }),
1028 }
1029 }
1030
1031 let tools = request
1032 .tools
1033 .iter()
1034 .map(|tool| Tool::Function {
1035 function: Function {
1036 name: tool.name.clone(),
1037 description: tool.description.clone(),
1038 parameters: tool.input_schema.clone(),
1039 },
1040 })
1041 .collect::<Vec<_>>();
1042
1043 Ok(CopilotChatRequest {
1044 n: 1,
1045 stream: model.uses_streaming(),
1046 temperature: temperature.unwrap_or(0.1),
1047 model: model.id().to_string(),
1048 messages,
1049 tools,
1050 tool_choice: tool_choice.map(|choice| match choice {
1051 LanguageModelToolChoice::Auto => ToolChoice::Auto,
1052 LanguageModelToolChoice::Any => ToolChoice::Required,
1053 LanguageModelToolChoice::None => ToolChoice::None,
1054 }),
1055 thinking_budget: None,
1056 })
1057}
1058
1059fn compute_thinking_budget(
1060 min_budget: Option<u32>,
1061 max_budget: Option<u32>,
1062 max_output_tokens: u32,
1063) -> Option<u32> {
1064 let configured_budget: u32 = 16000;
1065 let min_budget = min_budget.unwrap_or(1024);
1066 let max_budget = max_budget.unwrap_or(max_output_tokens.saturating_sub(1));
1067 let normalized = configured_budget.max(min_budget);
1068 Some(
1069 normalized
1070 .min(max_budget)
1071 .min(max_output_tokens.saturating_sub(1)),
1072 )
1073}
1074
1075fn intent_to_chat_location(intent: Option<CompletionIntent>) -> ChatLocation {
1076 match intent {
1077 Some(CompletionIntent::UserPrompt) => ChatLocation::Agent,
1078 Some(CompletionIntent::Subagent) => ChatLocation::Agent,
1079 Some(CompletionIntent::ToolResults) => ChatLocation::Agent,
1080 Some(CompletionIntent::ThreadSummarization) => ChatLocation::Panel,
1081 Some(CompletionIntent::ThreadContextSummarization) => ChatLocation::Panel,
1082 Some(CompletionIntent::CreateFile) => ChatLocation::Agent,
1083 Some(CompletionIntent::EditFile) => ChatLocation::Agent,
1084 Some(CompletionIntent::InlineAssist) => ChatLocation::Editor,
1085 Some(CompletionIntent::TerminalInlineAssist) => ChatLocation::Terminal,
1086 Some(CompletionIntent::GenerateGitCommitMessage) => ChatLocation::Other,
1087 None => ChatLocation::Panel,
1088 }
1089}
1090
1091fn into_copilot_responses(
1092 model: &CopilotChatModel,
1093 request: LanguageModelRequest,
1094) -> copilot_responses::Request {
1095 use copilot_responses as responses;
1096
1097 let LanguageModelRequest {
1098 thread_id: _,
1099 prompt_id: _,
1100 intent: _,
1101 messages,
1102 tools,
1103 tool_choice,
1104 stop: _,
1105 temperature,
1106 thinking_allowed,
1107 thinking_effort: _,
1108 speed: _,
1109 } = request;
1110
1111 let mut input_items: Vec<responses::ResponseInputItem> = Vec::new();
1112
1113 for message in messages {
1114 match message.role {
1115 Role::User => {
1116 for content in &message.content {
1117 if let MessageContent::ToolResult(tool_result) = content {
1118 let output = match &tool_result.content {
1119 LanguageModelToolResultContent::Text(text) => {
1120 responses::ResponseFunctionOutput::Text(text.to_string())
1121 }
1122 LanguageModelToolResultContent::Image(image) => {
1123 if model.supports_vision() {
1124 responses::ResponseFunctionOutput::Content(vec![
1125 responses::ResponseInputContent::InputImage {
1126 image_url: Some(image.to_base64_url()),
1127 detail: Default::default(),
1128 },
1129 ])
1130 } else {
1131 debug_panic!(
1132 "This should be caught at {} level",
1133 tool_result.tool_name
1134 );
1135 responses::ResponseFunctionOutput::Text(
1136 "[Tool responded with an image, but this model does not support vision]".into(),
1137 )
1138 }
1139 }
1140 };
1141
1142 input_items.push(responses::ResponseInputItem::FunctionCallOutput {
1143 call_id: tool_result.tool_use_id.to_string(),
1144 output,
1145 status: None,
1146 });
1147 }
1148 }
1149
1150 let mut parts: Vec<responses::ResponseInputContent> = Vec::new();
1151 for content in &message.content {
1152 match content {
1153 MessageContent::Text(text) => {
1154 parts.push(responses::ResponseInputContent::InputText {
1155 text: text.clone(),
1156 });
1157 }
1158
1159 MessageContent::Image(image) => {
1160 if model.supports_vision() {
1161 parts.push(responses::ResponseInputContent::InputImage {
1162 image_url: Some(image.to_base64_url()),
1163 detail: Default::default(),
1164 });
1165 }
1166 }
1167 _ => {}
1168 }
1169 }
1170
1171 if !parts.is_empty() {
1172 input_items.push(responses::ResponseInputItem::Message {
1173 role: "user".into(),
1174 content: Some(parts),
1175 status: None,
1176 });
1177 }
1178 }
1179
1180 Role::Assistant => {
1181 for content in &message.content {
1182 if let MessageContent::ToolUse(tool_use) = content {
1183 input_items.push(responses::ResponseInputItem::FunctionCall {
1184 call_id: tool_use.id.to_string(),
1185 name: tool_use.name.to_string(),
1186 arguments: tool_use.raw_input.clone(),
1187 status: None,
1188 thought_signature: tool_use.thought_signature.clone(),
1189 });
1190 }
1191 }
1192
1193 for content in &message.content {
1194 if let MessageContent::RedactedThinking(data) = content {
1195 input_items.push(responses::ResponseInputItem::Reasoning {
1196 id: None,
1197 summary: Vec::new(),
1198 encrypted_content: data.clone(),
1199 });
1200 }
1201 }
1202
1203 let mut parts: Vec<responses::ResponseInputContent> = Vec::new();
1204 for content in &message.content {
1205 match content {
1206 MessageContent::Text(text) => {
1207 parts.push(responses::ResponseInputContent::OutputText {
1208 text: text.clone(),
1209 });
1210 }
1211 MessageContent::Image(_) => {
1212 parts.push(responses::ResponseInputContent::OutputText {
1213 text: "[image omitted]".to_string(),
1214 });
1215 }
1216 _ => {}
1217 }
1218 }
1219
1220 if !parts.is_empty() {
1221 input_items.push(responses::ResponseInputItem::Message {
1222 role: "assistant".into(),
1223 content: Some(parts),
1224 status: Some("completed".into()),
1225 });
1226 }
1227 }
1228
1229 Role::System => {
1230 let mut parts: Vec<responses::ResponseInputContent> = Vec::new();
1231 for content in &message.content {
1232 if let MessageContent::Text(text) = content {
1233 parts.push(responses::ResponseInputContent::InputText {
1234 text: text.clone(),
1235 });
1236 }
1237 }
1238
1239 if !parts.is_empty() {
1240 input_items.push(responses::ResponseInputItem::Message {
1241 role: "system".into(),
1242 content: Some(parts),
1243 status: None,
1244 });
1245 }
1246 }
1247 }
1248 }
1249
1250 let converted_tools: Vec<responses::ToolDefinition> = tools
1251 .into_iter()
1252 .map(|tool| responses::ToolDefinition::Function {
1253 name: tool.name,
1254 description: Some(tool.description),
1255 parameters: Some(tool.input_schema),
1256 strict: None,
1257 })
1258 .collect();
1259
1260 let mapped_tool_choice = tool_choice.map(|choice| match choice {
1261 LanguageModelToolChoice::Auto => responses::ToolChoice::Auto,
1262 LanguageModelToolChoice::Any => responses::ToolChoice::Required,
1263 LanguageModelToolChoice::None => responses::ToolChoice::None,
1264 });
1265
1266 responses::Request {
1267 model: model.id().to_string(),
1268 input: input_items,
1269 stream: model.uses_streaming(),
1270 temperature,
1271 tools: converted_tools,
1272 tool_choice: mapped_tool_choice,
1273 reasoning: if thinking_allowed {
1274 Some(copilot_responses::ReasoningConfig {
1275 effort: copilot_responses::ReasoningEffort::Medium,
1276 summary: Some(copilot_responses::ReasoningSummary::Detailed),
1277 })
1278 } else {
1279 None
1280 },
1281 include: Some(vec![
1282 copilot_responses::ResponseIncludable::ReasoningEncryptedContent,
1283 ]),
1284 store: false,
1285 }
1286}
1287
1288#[cfg(test)]
1289mod tests {
1290 use super::*;
1291 use copilot_chat::responses;
1292 use futures::StreamExt;
1293
1294 fn map_events(events: Vec<responses::StreamEvent>) -> Vec<LanguageModelCompletionEvent> {
1295 futures::executor::block_on(async {
1296 CopilotResponsesEventMapper::new()
1297 .map_stream(Box::pin(futures::stream::iter(events.into_iter().map(Ok))))
1298 .collect::<Vec<_>>()
1299 .await
1300 .into_iter()
1301 .map(Result::unwrap)
1302 .collect()
1303 })
1304 }
1305
1306 #[test]
1307 fn responses_stream_maps_text_and_usage() {
1308 let events = vec![
1309 responses::StreamEvent::OutputItemAdded {
1310 output_index: 0,
1311 sequence_number: None,
1312 item: responses::ResponseOutputItem::Message {
1313 id: "msg_1".into(),
1314 role: "assistant".into(),
1315 content: Some(Vec::new()),
1316 },
1317 },
1318 responses::StreamEvent::OutputTextDelta {
1319 item_id: "msg_1".into(),
1320 output_index: 0,
1321 delta: "Hello".into(),
1322 },
1323 responses::StreamEvent::Completed {
1324 response: responses::Response {
1325 usage: Some(responses::ResponseUsage {
1326 input_tokens: Some(5),
1327 output_tokens: Some(3),
1328 total_tokens: Some(8),
1329 }),
1330 ..Default::default()
1331 },
1332 },
1333 ];
1334
1335 let mapped = map_events(events);
1336 assert!(matches!(
1337 mapped[0],
1338 LanguageModelCompletionEvent::StartMessage { ref message_id } if message_id == "msg_1"
1339 ));
1340 assert!(matches!(
1341 mapped[1],
1342 LanguageModelCompletionEvent::Text(ref text) if text == "Hello"
1343 ));
1344 assert!(matches!(
1345 mapped[2],
1346 LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
1347 input_tokens: 5,
1348 output_tokens: 3,
1349 ..
1350 })
1351 ));
1352 assert!(matches!(
1353 mapped[3],
1354 LanguageModelCompletionEvent::Stop(StopReason::EndTurn)
1355 ));
1356 }
1357
1358 #[test]
1359 fn responses_stream_maps_tool_calls() {
1360 let events = vec![responses::StreamEvent::OutputItemDone {
1361 output_index: 0,
1362 sequence_number: None,
1363 item: responses::ResponseOutputItem::FunctionCall {
1364 id: Some("fn_1".into()),
1365 call_id: "call_1".into(),
1366 name: "do_it".into(),
1367 arguments: "{\"x\":1}".into(),
1368 status: None,
1369 thought_signature: None,
1370 },
1371 }];
1372
1373 let mapped = map_events(events);
1374 assert!(matches!(
1375 mapped[0],
1376 LanguageModelCompletionEvent::ToolUse(ref use_) if use_.id.to_string() == "call_1" && use_.name.as_ref() == "do_it"
1377 ));
1378 assert!(matches!(
1379 mapped[1],
1380 LanguageModelCompletionEvent::Stop(StopReason::ToolUse)
1381 ));
1382 }
1383
1384 #[test]
1385 fn responses_stream_handles_json_parse_error() {
1386 let events = vec![responses::StreamEvent::OutputItemDone {
1387 output_index: 0,
1388 sequence_number: None,
1389 item: responses::ResponseOutputItem::FunctionCall {
1390 id: Some("fn_1".into()),
1391 call_id: "call_1".into(),
1392 name: "do_it".into(),
1393 arguments: "{not json}".into(),
1394 status: None,
1395 thought_signature: None,
1396 },
1397 }];
1398
1399 let mapped = map_events(events);
1400 assert!(matches!(
1401 mapped[0],
1402 LanguageModelCompletionEvent::ToolUseJsonParseError { ref id, ref tool_name, .. }
1403 if id.to_string() == "call_1" && tool_name.as_ref() == "do_it"
1404 ));
1405 assert!(matches!(
1406 mapped[1],
1407 LanguageModelCompletionEvent::Stop(StopReason::ToolUse)
1408 ));
1409 }
1410
1411 #[test]
1412 fn responses_stream_maps_reasoning_summary_and_encrypted_content() {
1413 let events = vec![responses::StreamEvent::OutputItemDone {
1414 output_index: 0,
1415 sequence_number: None,
1416 item: responses::ResponseOutputItem::Reasoning {
1417 id: "r1".into(),
1418 summary: Some(vec![responses::ResponseReasoningItem {
1419 kind: "summary_text".into(),
1420 text: "Chain".into(),
1421 }]),
1422 encrypted_content: Some("ENC".into()),
1423 },
1424 }];
1425
1426 let mapped = map_events(events);
1427 assert!(matches!(
1428 mapped[0],
1429 LanguageModelCompletionEvent::Thinking { ref text, signature: None } if text == "Chain"
1430 ));
1431 assert!(matches!(
1432 mapped[1],
1433 LanguageModelCompletionEvent::RedactedThinking { ref data } if data == "ENC"
1434 ));
1435 }
1436
1437 #[test]
1438 fn responses_stream_handles_incomplete_max_tokens() {
1439 let events = vec![responses::StreamEvent::Incomplete {
1440 response: responses::Response {
1441 usage: Some(responses::ResponseUsage {
1442 input_tokens: Some(10),
1443 output_tokens: Some(0),
1444 total_tokens: Some(10),
1445 }),
1446 incomplete_details: Some(responses::IncompleteDetails {
1447 reason: Some(responses::IncompleteReason::MaxOutputTokens),
1448 }),
1449 ..Default::default()
1450 },
1451 }];
1452
1453 let mapped = map_events(events);
1454 assert!(matches!(
1455 mapped[0],
1456 LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
1457 input_tokens: 10,
1458 output_tokens: 0,
1459 ..
1460 })
1461 ));
1462 assert!(matches!(
1463 mapped[1],
1464 LanguageModelCompletionEvent::Stop(StopReason::MaxTokens)
1465 ));
1466 }
1467
1468 #[test]
1469 fn responses_stream_handles_incomplete_content_filter() {
1470 let events = vec![responses::StreamEvent::Incomplete {
1471 response: responses::Response {
1472 usage: None,
1473 incomplete_details: Some(responses::IncompleteDetails {
1474 reason: Some(responses::IncompleteReason::ContentFilter),
1475 }),
1476 ..Default::default()
1477 },
1478 }];
1479
1480 let mapped = map_events(events);
1481 assert!(matches!(
1482 mapped.last().unwrap(),
1483 LanguageModelCompletionEvent::Stop(StopReason::Refusal)
1484 ));
1485 }
1486
1487 #[test]
1488 fn responses_stream_completed_no_duplicate_after_tool_use() {
1489 let events = vec![
1490 responses::StreamEvent::OutputItemDone {
1491 output_index: 0,
1492 sequence_number: None,
1493 item: responses::ResponseOutputItem::FunctionCall {
1494 id: Some("fn_1".into()),
1495 call_id: "call_1".into(),
1496 name: "do_it".into(),
1497 arguments: "{}".into(),
1498 status: None,
1499 thought_signature: None,
1500 },
1501 },
1502 responses::StreamEvent::Completed {
1503 response: responses::Response::default(),
1504 },
1505 ];
1506
1507 let mapped = map_events(events);
1508
1509 let mut stop_count = 0usize;
1510 let mut saw_tool_use_stop = false;
1511 for event in mapped {
1512 if let LanguageModelCompletionEvent::Stop(reason) = event {
1513 stop_count += 1;
1514 if matches!(reason, StopReason::ToolUse) {
1515 saw_tool_use_stop = true;
1516 }
1517 }
1518 }
1519 assert_eq!(stop_count, 1, "should emit exactly one Stop event");
1520 assert!(saw_tool_use_stop, "Stop reason should be ToolUse");
1521 }
1522
1523 #[test]
1524 fn responses_stream_failed_maps_http_response_error() {
1525 let events = vec![responses::StreamEvent::Failed {
1526 response: responses::Response {
1527 error: Some(responses::ResponseError {
1528 code: "429".into(),
1529 message: "too many requests".into(),
1530 }),
1531 ..Default::default()
1532 },
1533 }];
1534
1535 let mapped_results = futures::executor::block_on(async {
1536 CopilotResponsesEventMapper::new()
1537 .map_stream(Box::pin(futures::stream::iter(events.into_iter().map(Ok))))
1538 .collect::<Vec<_>>()
1539 .await
1540 });
1541
1542 assert_eq!(mapped_results.len(), 1);
1543 match &mapped_results[0] {
1544 Err(LanguageModelCompletionError::HttpResponseError {
1545 status_code,
1546 message,
1547 ..
1548 }) => {
1549 assert_eq!(*status_code, http_client::StatusCode::TOO_MANY_REQUESTS);
1550 assert_eq!(message, "too many requests");
1551 }
1552 other => panic!("expected HttpResponseError, got {:?}", other),
1553 }
1554 }
1555
1556 #[test]
1557 fn chat_completions_stream_maps_reasoning_data() {
1558 use copilot_chat::{
1559 FunctionChunk, ResponseChoice, ResponseDelta, ResponseEvent, Role, ToolCallChunk,
1560 };
1561
1562 let events = vec![
1563 ResponseEvent {
1564 choices: vec![ResponseChoice {
1565 index: Some(0),
1566 finish_reason: None,
1567 delta: Some(ResponseDelta {
1568 content: None,
1569 role: Some(Role::Assistant),
1570 tool_calls: vec![ToolCallChunk {
1571 index: Some(0),
1572 id: Some("call_abc123".to_string()),
1573 function: Some(FunctionChunk {
1574 name: Some("list_directory".to_string()),
1575 arguments: Some("{\"path\":\"test\"}".to_string()),
1576 thought_signature: None,
1577 }),
1578 }],
1579 reasoning_opaque: Some("encrypted_reasoning_token_xyz".to_string()),
1580 reasoning_text: Some("Let me check the directory".to_string()),
1581 }),
1582 message: None,
1583 }],
1584 id: "chatcmpl-123".to_string(),
1585 usage: None,
1586 },
1587 ResponseEvent {
1588 choices: vec![ResponseChoice {
1589 index: Some(0),
1590 finish_reason: Some("tool_calls".to_string()),
1591 delta: Some(ResponseDelta {
1592 content: None,
1593 role: None,
1594 tool_calls: vec![],
1595 reasoning_opaque: None,
1596 reasoning_text: None,
1597 }),
1598 message: None,
1599 }],
1600 id: "chatcmpl-123".to_string(),
1601 usage: None,
1602 },
1603 ];
1604
1605 let mapped = futures::executor::block_on(async {
1606 map_to_language_model_completion_events(
1607 Box::pin(futures::stream::iter(events.into_iter().map(Ok))),
1608 true,
1609 )
1610 .collect::<Vec<_>>()
1611 .await
1612 });
1613
1614 let mut has_reasoning_details = false;
1615 let mut has_tool_use = false;
1616 let mut reasoning_opaque_value: Option<String> = None;
1617 let mut reasoning_text_value: Option<String> = None;
1618
1619 for event_result in mapped {
1620 match event_result {
1621 Ok(LanguageModelCompletionEvent::ReasoningDetails(details)) => {
1622 has_reasoning_details = true;
1623 reasoning_opaque_value = details
1624 .get("reasoning_opaque")
1625 .and_then(|v| v.as_str())
1626 .map(|s| s.to_string());
1627 reasoning_text_value = details
1628 .get("reasoning_text")
1629 .and_then(|v| v.as_str())
1630 .map(|s| s.to_string());
1631 }
1632 Ok(LanguageModelCompletionEvent::ToolUse(tool_use)) => {
1633 has_tool_use = true;
1634 assert_eq!(tool_use.id.to_string(), "call_abc123");
1635 assert_eq!(tool_use.name.as_ref(), "list_directory");
1636 }
1637 _ => {}
1638 }
1639 }
1640
1641 assert!(
1642 has_reasoning_details,
1643 "Should emit ReasoningDetails event for Gemini 3 reasoning"
1644 );
1645 assert!(has_tool_use, "Should emit ToolUse event");
1646 assert_eq!(
1647 reasoning_opaque_value,
1648 Some("encrypted_reasoning_token_xyz".to_string()),
1649 "Should capture reasoning_opaque"
1650 );
1651 assert_eq!(
1652 reasoning_text_value,
1653 Some("Let me check the directory".to_string()),
1654 "Should capture reasoning_text"
1655 );
1656 }
1657}