1use std::pin::Pin;
2use std::str::FromStr as _;
3use std::sync::Arc;
4
5use anthropic::AnthropicModelMode;
6use anyhow::{Result, anyhow};
7use collections::HashMap;
8use copilot::{GlobalCopilotAuth, Status};
9use copilot_chat::responses as copilot_responses;
10use copilot_chat::{
11 ChatLocation, ChatMessage, ChatMessageContent, ChatMessagePart, CopilotChat,
12 CopilotChatConfiguration, Function, FunctionContent, ImageUrl, Model as CopilotChatModel,
13 ModelVendor, Request as CopilotChatRequest, ResponseEvent, Tool, ToolCall, ToolCallContent,
14 ToolChoice,
15};
16use futures::future::BoxFuture;
17use futures::stream::BoxStream;
18use futures::{FutureExt, Stream, StreamExt};
19use gpui::{AnyView, App, AsyncApp, Entity, Subscription, Task};
20use http_client::StatusCode;
21use language::language_settings::all_language_settings;
22use language_model::{
23 AuthenticateError, CompletionIntent, IconOrSvg, LanguageModel, LanguageModelCompletionError,
24 LanguageModelCompletionEvent, LanguageModelCostInfo, LanguageModelEffortLevel, LanguageModelId,
25 LanguageModelName, LanguageModelProvider, LanguageModelProviderId, LanguageModelProviderName,
26 LanguageModelProviderState, LanguageModelRequest, LanguageModelRequestMessage,
27 LanguageModelToolChoice, LanguageModelToolResultContent, LanguageModelToolSchemaFormat,
28 LanguageModelToolUse, MessageContent, RateLimiter, Role, StopReason, TokenUsage,
29};
30use settings::SettingsStore;
31use ui::prelude::*;
32use util::debug_panic;
33
34use crate::provider::anthropic::{AnthropicEventMapper, into_anthropic};
35use language_model::util::{fix_streamed_json, parse_tool_arguments};
36
37const PROVIDER_ID: LanguageModelProviderId = LanguageModelProviderId::new("copilot_chat");
38const PROVIDER_NAME: LanguageModelProviderName =
39 LanguageModelProviderName::new("GitHub Copilot Chat");
40
41pub struct CopilotChatLanguageModelProvider {
42 state: Entity<State>,
43}
44
45pub struct State {
46 _copilot_chat_subscription: Option<Subscription>,
47 _settings_subscription: Subscription,
48}
49
50impl State {
51 fn is_authenticated(&self, cx: &App) -> bool {
52 CopilotChat::global(cx)
53 .map(|m| m.read(cx).is_authenticated())
54 .unwrap_or(false)
55 }
56}
57
58impl CopilotChatLanguageModelProvider {
59 pub fn new(cx: &mut App) -> Self {
60 let state = cx.new(|cx| {
61 let copilot_chat_subscription = CopilotChat::global(cx)
62 .map(|copilot_chat| cx.observe(&copilot_chat, |_, _, cx| cx.notify()));
63 State {
64 _copilot_chat_subscription: copilot_chat_subscription,
65 _settings_subscription: cx.observe_global::<SettingsStore>(|_, cx| {
66 if let Some(copilot_chat) = CopilotChat::global(cx) {
67 let language_settings = all_language_settings(None, cx);
68 let configuration = CopilotChatConfiguration {
69 enterprise_uri: language_settings
70 .edit_predictions
71 .copilot
72 .enterprise_uri
73 .clone(),
74 };
75 copilot_chat.update(cx, |chat, cx| {
76 chat.set_configuration(configuration, cx);
77 });
78 }
79 cx.notify();
80 }),
81 }
82 });
83
84 Self { state }
85 }
86
87 fn create_language_model(&self, model: CopilotChatModel) -> Arc<dyn LanguageModel> {
88 Arc::new(CopilotChatLanguageModel {
89 model,
90 request_limiter: RateLimiter::new(4),
91 })
92 }
93}
94
95impl LanguageModelProviderState for CopilotChatLanguageModelProvider {
96 type ObservableEntity = State;
97
98 fn observable_entity(&self) -> Option<Entity<Self::ObservableEntity>> {
99 Some(self.state.clone())
100 }
101}
102
103impl LanguageModelProvider for CopilotChatLanguageModelProvider {
104 fn id(&self) -> LanguageModelProviderId {
105 PROVIDER_ID
106 }
107
108 fn name(&self) -> LanguageModelProviderName {
109 PROVIDER_NAME
110 }
111
112 fn icon(&self) -> IconOrSvg {
113 IconOrSvg::Icon(IconName::Copilot)
114 }
115
116 fn default_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
117 let models = CopilotChat::global(cx).and_then(|m| m.read(cx).models())?;
118 models
119 .first()
120 .map(|model| self.create_language_model(model.clone()))
121 }
122
123 fn default_fast_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
124 // The default model should be Copilot Chat's 'base model', which is likely a relatively fast
125 // model (e.g. 4o) and a sensible choice when considering premium requests
126 self.default_model(cx)
127 }
128
129 fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
130 let Some(models) = CopilotChat::global(cx).and_then(|m| m.read(cx).models()) else {
131 return Vec::new();
132 };
133 models
134 .iter()
135 .map(|model| self.create_language_model(model.clone()))
136 .collect()
137 }
138
139 fn is_authenticated(&self, cx: &App) -> bool {
140 self.state.read(cx).is_authenticated(cx)
141 }
142
143 fn authenticate(&self, cx: &mut App) -> Task<Result<(), AuthenticateError>> {
144 if self.is_authenticated(cx) {
145 return Task::ready(Ok(()));
146 };
147
148 let Some(copilot) = GlobalCopilotAuth::try_global(cx).cloned() else {
149 return Task::ready(Err(anyhow!(concat!(
150 "Copilot must be enabled for Copilot Chat to work. ",
151 "Please enable Copilot and try again."
152 ))
153 .into()));
154 };
155
156 let err = match copilot.0.read(cx).status() {
157 Status::Authorized => return Task::ready(Ok(())),
158 Status::Disabled => anyhow!(
159 "Copilot must be enabled for Copilot Chat to work. Please enable Copilot and try again."
160 ),
161 Status::Error(err) => anyhow!(format!(
162 "Received the following error while signing into Copilot: {err}"
163 )),
164 Status::Starting { task: _ } => anyhow!(
165 "Copilot is still starting, please wait for Copilot to start then try again"
166 ),
167 Status::Unauthorized => anyhow!(
168 "Unable to authorize with Copilot. Please make sure that you have an active Copilot and Copilot Chat subscription."
169 ),
170 Status::SignedOut { .. } => {
171 anyhow!("You have signed out of Copilot. Please sign in to Copilot and try again.")
172 }
173 Status::SigningIn { prompt: _ } => anyhow!("Still signing into Copilot..."),
174 };
175
176 Task::ready(Err(err.into()))
177 }
178
179 fn configuration_view(
180 &self,
181 _target_agent: language_model::ConfigurationViewTargetAgent,
182 _: &mut Window,
183 cx: &mut App,
184 ) -> AnyView {
185 cx.new(|cx| {
186 copilot_ui::ConfigurationView::new(
187 |cx| {
188 CopilotChat::global(cx)
189 .map(|m| m.read(cx).is_authenticated())
190 .unwrap_or(false)
191 },
192 copilot_ui::ConfigurationMode::Chat,
193 cx,
194 )
195 })
196 .into()
197 }
198
199 fn reset_credentials(&self, _cx: &mut App) -> Task<Result<()>> {
200 Task::ready(Err(anyhow!(
201 "Signing out of GitHub Copilot Chat is currently not supported."
202 )))
203 }
204}
205
206pub struct CopilotChatLanguageModel {
207 model: CopilotChatModel,
208 request_limiter: RateLimiter,
209}
210
211impl LanguageModel for CopilotChatLanguageModel {
212 fn id(&self) -> LanguageModelId {
213 LanguageModelId::from(self.model.id().to_string())
214 }
215
216 fn name(&self) -> LanguageModelName {
217 LanguageModelName::from(self.model.display_name().to_string())
218 }
219
220 fn provider_id(&self) -> LanguageModelProviderId {
221 PROVIDER_ID
222 }
223
224 fn provider_name(&self) -> LanguageModelProviderName {
225 PROVIDER_NAME
226 }
227
228 fn supports_tools(&self) -> bool {
229 self.model.supports_tools()
230 }
231
232 fn supports_streaming_tools(&self) -> bool {
233 true
234 }
235
236 fn supports_images(&self) -> bool {
237 self.model.supports_vision()
238 }
239
240 fn supports_thinking(&self) -> bool {
241 self.model.can_think()
242 }
243
244 fn supported_effort_levels(&self) -> Vec<LanguageModelEffortLevel> {
245 let levels = self.model.reasoning_effort_levels();
246 if levels.is_empty() {
247 return vec![];
248 }
249 levels
250 .iter()
251 .map(|level| {
252 let name = match level.as_str() {
253 "low" => "Low".into(),
254 "medium" => "Medium".into(),
255 "high" => "High".into(),
256 "xhigh" => "Extra High".into(),
257 _ => language_model::SharedString::from(level.clone()),
258 };
259 LanguageModelEffortLevel {
260 name,
261 value: language_model::SharedString::from(level.clone()),
262 is_default: level == "high",
263 }
264 })
265 .collect()
266 }
267
268 fn tool_input_format(&self) -> LanguageModelToolSchemaFormat {
269 match self.model.vendor() {
270 ModelVendor::OpenAI | ModelVendor::Anthropic => {
271 LanguageModelToolSchemaFormat::JsonSchema
272 }
273 ModelVendor::Google | ModelVendor::XAI | ModelVendor::Unknown => {
274 LanguageModelToolSchemaFormat::JsonSchemaSubset
275 }
276 }
277 }
278
279 fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
280 match choice {
281 LanguageModelToolChoice::Auto
282 | LanguageModelToolChoice::Any
283 | LanguageModelToolChoice::None => self.supports_tools(),
284 }
285 }
286
287 fn model_cost_info(&self) -> Option<LanguageModelCostInfo> {
288 LanguageModelCostInfo::RequestCost {
289 cost_per_request: self.model.multiplier(),
290 }
291 .into()
292 }
293
294 fn telemetry_id(&self) -> String {
295 format!("copilot_chat/{}", self.model.id())
296 }
297
298 fn max_token_count(&self) -> u64 {
299 self.model.max_token_count()
300 }
301
302 fn stream_completion(
303 &self,
304 request: LanguageModelRequest,
305 cx: &AsyncApp,
306 ) -> BoxFuture<
307 'static,
308 Result<
309 BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>,
310 LanguageModelCompletionError,
311 >,
312 > {
313 let is_user_initiated = request.intent.is_none_or(|intent| match intent {
314 CompletionIntent::UserPrompt
315 | CompletionIntent::ThreadContextSummarization
316 | CompletionIntent::InlineAssist
317 | CompletionIntent::TerminalInlineAssist
318 | CompletionIntent::GenerateGitCommitMessage => true,
319
320 CompletionIntent::Subagent
321 | CompletionIntent::ToolResults
322 | CompletionIntent::ThreadSummarization
323 | CompletionIntent::CreateFile
324 | CompletionIntent::EditFile => false,
325 });
326
327 if self.model.supports_messages() {
328 let location = intent_to_chat_location(request.intent);
329 let model = self.model.clone();
330 let request_limiter = self.request_limiter.clone();
331 let future = cx.spawn(async move |cx| {
332 let effort = request
333 .thinking_effort
334 .as_ref()
335 .and_then(|e| anthropic::Effort::from_str(e).ok());
336
337 let mut anthropic_request = into_anthropic(
338 request,
339 model.id().to_string(),
340 0.0,
341 model.max_output_tokens() as u64,
342 if model.supports_adaptive_thinking() {
343 AnthropicModelMode::Thinking {
344 budget_tokens: None,
345 }
346 } else if model.supports_thinking() {
347 AnthropicModelMode::Thinking {
348 budget_tokens: compute_thinking_budget(
349 model.min_thinking_budget(),
350 model.max_thinking_budget(),
351 model.max_output_tokens() as u32,
352 ),
353 }
354 } else {
355 AnthropicModelMode::Default
356 },
357 );
358
359 anthropic_request.temperature = None;
360
361 // The Copilot proxy doesn't support eager_input_streaming on tools.
362 for tool in &mut anthropic_request.tools {
363 tool.eager_input_streaming = false;
364 }
365
366 if model.supports_adaptive_thinking() {
367 if anthropic_request.thinking.is_some() {
368 anthropic_request.thinking = Some(anthropic::Thinking::Adaptive);
369 anthropic_request.output_config =
370 effort.map(|effort| anthropic::OutputConfig {
371 effort: Some(effort),
372 });
373 }
374 }
375
376 let anthropic_beta =
377 if !model.supports_adaptive_thinking() && model.supports_thinking() {
378 Some("interleaved-thinking-2025-05-14".to_string())
379 } else {
380 None
381 };
382
383 let body = serde_json::to_string(&anthropic::StreamingRequest {
384 base: anthropic_request,
385 stream: true,
386 })
387 .map_err(|e| anyhow::anyhow!(e))?;
388
389 let stream = CopilotChat::stream_messages(
390 body,
391 location,
392 is_user_initiated,
393 anthropic_beta,
394 cx.clone(),
395 );
396
397 request_limiter
398 .stream(async move {
399 let events = stream.await?;
400 let mapper = AnthropicEventMapper::new();
401 Ok(mapper.map_stream(events).boxed())
402 })
403 .await
404 });
405 return async move { Ok(future.await?.boxed()) }.boxed();
406 }
407
408 if self.model.supports_response() {
409 let location = intent_to_chat_location(request.intent);
410 let responses_request = into_copilot_responses(&self.model, request);
411 let request_limiter = self.request_limiter.clone();
412 let future = cx.spawn(async move |cx| {
413 let request = CopilotChat::stream_response(
414 responses_request,
415 location,
416 is_user_initiated,
417 cx.clone(),
418 );
419 request_limiter
420 .stream(async move {
421 let stream = request.await?;
422 let mapper = CopilotResponsesEventMapper::new();
423 Ok(mapper.map_stream(stream).boxed())
424 })
425 .await
426 });
427 return async move { Ok(future.await?.boxed()) }.boxed();
428 }
429
430 let location = intent_to_chat_location(request.intent);
431 let copilot_request = match into_copilot_chat(&self.model, request) {
432 Ok(request) => request,
433 Err(err) => return futures::future::ready(Err(err.into())).boxed(),
434 };
435 let is_streaming = copilot_request.stream;
436
437 let request_limiter = self.request_limiter.clone();
438 let future = cx.spawn(async move |cx| {
439 let request = CopilotChat::stream_completion(
440 copilot_request,
441 location,
442 is_user_initiated,
443 cx.clone(),
444 );
445 request_limiter
446 .stream(async move {
447 let response = request.await?;
448 Ok(map_to_language_model_completion_events(
449 response,
450 is_streaming,
451 ))
452 })
453 .await
454 });
455 async move { Ok(future.await?.boxed()) }.boxed()
456 }
457}
458
459pub fn map_to_language_model_completion_events(
460 events: Pin<Box<dyn Send + Stream<Item = Result<ResponseEvent>>>>,
461 is_streaming: bool,
462) -> impl Stream<Item = Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
463 #[derive(Default)]
464 struct RawToolCall {
465 id: String,
466 name: String,
467 arguments: String,
468 thought_signature: Option<String>,
469 }
470
471 struct State {
472 events: Pin<Box<dyn Send + Stream<Item = Result<ResponseEvent>>>>,
473 tool_calls_by_index: HashMap<usize, RawToolCall>,
474 reasoning_opaque: Option<String>,
475 reasoning_text: Option<String>,
476 }
477
478 futures::stream::unfold(
479 State {
480 events,
481 tool_calls_by_index: HashMap::default(),
482 reasoning_opaque: None,
483 reasoning_text: None,
484 },
485 move |mut state| async move {
486 if let Some(event) = state.events.next().await {
487 match event {
488 Ok(event) => {
489 let Some(choice) = event.choices.first() else {
490 return Some((
491 vec![Err(anyhow!("Response contained no choices").into())],
492 state,
493 ));
494 };
495
496 let delta = if is_streaming {
497 choice.delta.as_ref()
498 } else {
499 choice.message.as_ref()
500 };
501
502 let Some(delta) = delta else {
503 return Some((
504 vec![Err(anyhow!("Response contained no delta").into())],
505 state,
506 ));
507 };
508
509 let mut events = Vec::new();
510 if let Some(content) = delta.content.clone() {
511 events.push(Ok(LanguageModelCompletionEvent::Text(content)));
512 }
513
514 // Capture reasoning data from the delta (e.g. for Gemini 3)
515 if let Some(opaque) = delta.reasoning_opaque.clone() {
516 state.reasoning_opaque = Some(opaque);
517 }
518 if let Some(text) = delta.reasoning_text.clone() {
519 state.reasoning_text = Some(text);
520 }
521
522 for (index, tool_call) in delta.tool_calls.iter().enumerate() {
523 let tool_index = tool_call.index.unwrap_or(index);
524 let entry = state.tool_calls_by_index.entry(tool_index).or_default();
525
526 if let Some(tool_id) = tool_call.id.clone() {
527 entry.id = tool_id;
528 }
529
530 if let Some(function) = tool_call.function.as_ref() {
531 if let Some(name) = function.name.clone() {
532 entry.name = name;
533 }
534
535 if let Some(arguments) = function.arguments.clone() {
536 entry.arguments.push_str(&arguments);
537 }
538
539 if let Some(thought_signature) = function.thought_signature.clone()
540 {
541 entry.thought_signature = Some(thought_signature);
542 }
543 }
544
545 if !entry.id.is_empty() && !entry.name.is_empty() {
546 if let Ok(input) = serde_json::from_str::<serde_json::Value>(
547 &fix_streamed_json(&entry.arguments),
548 ) {
549 events.push(Ok(LanguageModelCompletionEvent::ToolUse(
550 LanguageModelToolUse {
551 id: entry.id.clone().into(),
552 name: entry.name.as_str().into(),
553 is_input_complete: false,
554 input,
555 raw_input: entry.arguments.clone(),
556 thought_signature: entry.thought_signature.clone(),
557 },
558 )));
559 }
560 }
561 }
562
563 if let Some(usage) = event.usage {
564 events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(
565 TokenUsage {
566 input_tokens: usage.prompt_tokens,
567 output_tokens: usage.completion_tokens,
568 cache_creation_input_tokens: 0,
569 cache_read_input_tokens: 0,
570 },
571 )));
572 }
573
574 match choice.finish_reason.as_deref() {
575 Some("stop") => {
576 events.push(Ok(LanguageModelCompletionEvent::Stop(
577 StopReason::EndTurn,
578 )));
579 }
580 Some("tool_calls") => {
581 // Gemini 3 models send reasoning_opaque/reasoning_text that must
582 // be preserved and sent back in subsequent requests. Emit as
583 // ReasoningDetails so the agent stores it in the message.
584 if state.reasoning_opaque.is_some()
585 || state.reasoning_text.is_some()
586 {
587 let mut details = serde_json::Map::new();
588 if let Some(opaque) = state.reasoning_opaque.take() {
589 details.insert(
590 "reasoning_opaque".to_string(),
591 serde_json::Value::String(opaque),
592 );
593 }
594 if let Some(text) = state.reasoning_text.take() {
595 details.insert(
596 "reasoning_text".to_string(),
597 serde_json::Value::String(text),
598 );
599 }
600 events.push(Ok(
601 LanguageModelCompletionEvent::ReasoningDetails(
602 serde_json::Value::Object(details),
603 ),
604 ));
605 }
606
607 events.extend(state.tool_calls_by_index.drain().map(
608 |(_, tool_call)| match parse_tool_arguments(
609 &tool_call.arguments,
610 ) {
611 Ok(input) => Ok(LanguageModelCompletionEvent::ToolUse(
612 LanguageModelToolUse {
613 id: tool_call.id.into(),
614 name: tool_call.name.as_str().into(),
615 is_input_complete: true,
616 input,
617 raw_input: tool_call.arguments,
618 thought_signature: tool_call.thought_signature,
619 },
620 )),
621 Err(error) => Ok(
622 LanguageModelCompletionEvent::ToolUseJsonParseError {
623 id: tool_call.id.into(),
624 tool_name: tool_call.name.as_str().into(),
625 raw_input: tool_call.arguments.into(),
626 json_parse_error: error.to_string(),
627 },
628 ),
629 },
630 ));
631
632 events.push(Ok(LanguageModelCompletionEvent::Stop(
633 StopReason::ToolUse,
634 )));
635 }
636 Some(stop_reason) => {
637 log::error!("Unexpected Copilot Chat stop_reason: {stop_reason:?}");
638 events.push(Ok(LanguageModelCompletionEvent::Stop(
639 StopReason::EndTurn,
640 )));
641 }
642 None => {}
643 }
644
645 return Some((events, state));
646 }
647 Err(err) => return Some((vec![Err(anyhow!(err).into())], state)),
648 }
649 }
650
651 None
652 },
653 )
654 .flat_map(futures::stream::iter)
655}
656
657pub struct CopilotResponsesEventMapper {
658 pending_stop_reason: Option<StopReason>,
659}
660
661impl CopilotResponsesEventMapper {
662 pub fn new() -> Self {
663 Self {
664 pending_stop_reason: None,
665 }
666 }
667
668 pub fn map_stream(
669 mut self,
670 events: Pin<Box<dyn Send + Stream<Item = Result<copilot_responses::StreamEvent>>>>,
671 ) -> impl Stream<Item = Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
672 {
673 events.flat_map(move |event| {
674 futures::stream::iter(match event {
675 Ok(event) => self.map_event(event),
676 Err(error) => vec![Err(LanguageModelCompletionError::from(anyhow!(error)))],
677 })
678 })
679 }
680
681 fn map_event(
682 &mut self,
683 event: copilot_responses::StreamEvent,
684 ) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
685 match event {
686 copilot_responses::StreamEvent::OutputItemAdded { item, .. } => match item {
687 copilot_responses::ResponseOutputItem::Message { id, .. } => {
688 vec![Ok(LanguageModelCompletionEvent::StartMessage {
689 message_id: id,
690 })]
691 }
692 _ => Vec::new(),
693 },
694
695 copilot_responses::StreamEvent::OutputTextDelta { delta, .. } => {
696 if delta.is_empty() {
697 Vec::new()
698 } else {
699 vec![Ok(LanguageModelCompletionEvent::Text(delta))]
700 }
701 }
702
703 copilot_responses::StreamEvent::OutputItemDone { item, .. } => match item {
704 copilot_responses::ResponseOutputItem::Message { .. } => Vec::new(),
705 copilot_responses::ResponseOutputItem::FunctionCall {
706 call_id,
707 name,
708 arguments,
709 thought_signature,
710 ..
711 } => {
712 let mut events = Vec::new();
713 match parse_tool_arguments(&arguments) {
714 Ok(input) => events.push(Ok(LanguageModelCompletionEvent::ToolUse(
715 LanguageModelToolUse {
716 id: call_id.into(),
717 name: name.as_str().into(),
718 is_input_complete: true,
719 input,
720 raw_input: arguments.clone(),
721 thought_signature,
722 },
723 ))),
724 Err(error) => {
725 events.push(Ok(LanguageModelCompletionEvent::ToolUseJsonParseError {
726 id: call_id.into(),
727 tool_name: name.as_str().into(),
728 raw_input: arguments.clone().into(),
729 json_parse_error: error.to_string(),
730 }))
731 }
732 }
733 // Record that we already emitted a tool-use stop so we can avoid duplicating
734 // a Stop event on Completed.
735 self.pending_stop_reason = Some(StopReason::ToolUse);
736 events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::ToolUse)));
737 events
738 }
739 copilot_responses::ResponseOutputItem::Reasoning {
740 summary,
741 encrypted_content,
742 ..
743 } => {
744 let mut events = Vec::new();
745
746 if let Some(blocks) = summary {
747 let mut text = String::new();
748 for block in blocks {
749 text.push_str(&block.text);
750 }
751 if !text.is_empty() {
752 events.push(Ok(LanguageModelCompletionEvent::Thinking {
753 text,
754 signature: None,
755 }));
756 }
757 }
758
759 if let Some(data) = encrypted_content {
760 events.push(Ok(LanguageModelCompletionEvent::RedactedThinking { data }));
761 }
762
763 events
764 }
765 },
766
767 copilot_responses::StreamEvent::Completed { response } => {
768 let mut events = Vec::new();
769 if let Some(usage) = response.usage {
770 events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
771 input_tokens: usage.input_tokens.unwrap_or(0),
772 output_tokens: usage.output_tokens.unwrap_or(0),
773 cache_creation_input_tokens: 0,
774 cache_read_input_tokens: 0,
775 })));
776 }
777 if self.pending_stop_reason.take() != Some(StopReason::ToolUse) {
778 events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::EndTurn)));
779 }
780 events
781 }
782
783 copilot_responses::StreamEvent::Incomplete { response } => {
784 let reason = response
785 .incomplete_details
786 .as_ref()
787 .and_then(|details| details.reason.as_ref());
788 let stop_reason = match reason {
789 Some(copilot_responses::IncompleteReason::MaxOutputTokens) => {
790 StopReason::MaxTokens
791 }
792 Some(copilot_responses::IncompleteReason::ContentFilter) => StopReason::Refusal,
793 _ => self
794 .pending_stop_reason
795 .take()
796 .unwrap_or(StopReason::EndTurn),
797 };
798
799 let mut events = Vec::new();
800 if let Some(usage) = response.usage {
801 events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
802 input_tokens: usage.input_tokens.unwrap_or(0),
803 output_tokens: usage.output_tokens.unwrap_or(0),
804 cache_creation_input_tokens: 0,
805 cache_read_input_tokens: 0,
806 })));
807 }
808 events.push(Ok(LanguageModelCompletionEvent::Stop(stop_reason)));
809 events
810 }
811
812 copilot_responses::StreamEvent::Failed { response } => {
813 let provider = PROVIDER_NAME;
814 let (status_code, message) = match response.error {
815 Some(error) => {
816 let status_code = StatusCode::from_str(&error.code)
817 .unwrap_or(StatusCode::INTERNAL_SERVER_ERROR);
818 (status_code, error.message)
819 }
820 None => (
821 StatusCode::INTERNAL_SERVER_ERROR,
822 "response.failed".to_string(),
823 ),
824 };
825 vec![Err(LanguageModelCompletionError::HttpResponseError {
826 provider,
827 status_code,
828 message,
829 })]
830 }
831
832 copilot_responses::StreamEvent::GenericError { error } => vec![Err(
833 LanguageModelCompletionError::Other(anyhow!(error.message)),
834 )],
835
836 copilot_responses::StreamEvent::Created { .. }
837 | copilot_responses::StreamEvent::Unknown => Vec::new(),
838 }
839 }
840}
841
842fn into_copilot_chat(
843 model: &CopilotChatModel,
844 request: LanguageModelRequest,
845) -> Result<CopilotChatRequest> {
846 let temperature = request.temperature;
847 let tool_choice = request.tool_choice;
848 let thinking_allowed = request.thinking_allowed;
849
850 let mut request_messages: Vec<LanguageModelRequestMessage> = Vec::new();
851 for message in request.messages {
852 if let Some(last_message) = request_messages.last_mut() {
853 if last_message.role == message.role {
854 last_message.content.extend(message.content);
855 } else {
856 request_messages.push(message);
857 }
858 } else {
859 request_messages.push(message);
860 }
861 }
862
863 let mut messages: Vec<ChatMessage> = Vec::new();
864 for message in request_messages {
865 match message.role {
866 Role::User => {
867 for content in &message.content {
868 if let MessageContent::ToolResult(tool_result) = content {
869 let content = match &tool_result.content {
870 LanguageModelToolResultContent::Text(text) => text.to_string().into(),
871 LanguageModelToolResultContent::Image(image) => {
872 if model.supports_vision() {
873 ChatMessageContent::Multipart(vec![ChatMessagePart::Image {
874 image_url: ImageUrl {
875 url: image.to_base64_url(),
876 },
877 }])
878 } else {
879 debug_panic!(
880 "This should be caught at {} level",
881 tool_result.tool_name
882 );
883 "[Tool responded with an image, but this model does not support vision]".to_string().into()
884 }
885 }
886 };
887
888 messages.push(ChatMessage::Tool {
889 tool_call_id: tool_result.tool_use_id.to_string(),
890 content,
891 });
892 }
893 }
894
895 let mut content_parts = Vec::new();
896 for content in &message.content {
897 match content {
898 MessageContent::Text(text) | MessageContent::Thinking { text, .. }
899 if !text.is_empty() =>
900 {
901 if let Some(ChatMessagePart::Text { text: text_content }) =
902 content_parts.last_mut()
903 {
904 text_content.push_str(text);
905 } else {
906 content_parts.push(ChatMessagePart::Text {
907 text: text.to_string(),
908 });
909 }
910 }
911 MessageContent::Image(image) if model.supports_vision() => {
912 content_parts.push(ChatMessagePart::Image {
913 image_url: ImageUrl {
914 url: image.to_base64_url(),
915 },
916 });
917 }
918 _ => {}
919 }
920 }
921
922 if !content_parts.is_empty() {
923 messages.push(ChatMessage::User {
924 content: content_parts.into(),
925 });
926 }
927 }
928 Role::Assistant => {
929 let mut tool_calls = Vec::new();
930 for content in &message.content {
931 if let MessageContent::ToolUse(tool_use) = content {
932 tool_calls.push(ToolCall {
933 id: tool_use.id.to_string(),
934 content: ToolCallContent::Function {
935 function: FunctionContent {
936 name: tool_use.name.to_string(),
937 arguments: serde_json::to_string(&tool_use.input)?,
938 thought_signature: tool_use.thought_signature.clone(),
939 },
940 },
941 });
942 }
943 }
944
945 let text_content = {
946 let mut buffer = String::new();
947 for string in message.content.iter().filter_map(|content| match content {
948 MessageContent::Text(text) => Some(text.as_str()),
949 MessageContent::Thinking { .. }
950 | MessageContent::ToolUse(_)
951 | MessageContent::RedactedThinking(_)
952 | MessageContent::ToolResult(_)
953 | MessageContent::Image(_) => None,
954 }) {
955 buffer.push_str(string);
956 }
957
958 buffer
959 };
960
961 // Extract reasoning_opaque and reasoning_text from reasoning_details
962 let (reasoning_opaque, reasoning_text) =
963 if let Some(details) = &message.reasoning_details {
964 let opaque = details
965 .get("reasoning_opaque")
966 .and_then(|v| v.as_str())
967 .map(|s| s.to_string());
968 let text = details
969 .get("reasoning_text")
970 .and_then(|v| v.as_str())
971 .map(|s| s.to_string());
972 (opaque, text)
973 } else {
974 (None, None)
975 };
976
977 messages.push(ChatMessage::Assistant {
978 content: if text_content.is_empty() {
979 ChatMessageContent::empty()
980 } else {
981 text_content.into()
982 },
983 tool_calls,
984 reasoning_opaque,
985 reasoning_text,
986 });
987 }
988 Role::System => messages.push(ChatMessage::System {
989 content: message.string_contents(),
990 }),
991 }
992 }
993
994 let tools = request
995 .tools
996 .iter()
997 .map(|tool| Tool::Function {
998 function: Function {
999 name: tool.name.clone(),
1000 description: tool.description.clone(),
1001 parameters: tool.input_schema.clone(),
1002 },
1003 })
1004 .collect::<Vec<_>>();
1005
1006 Ok(CopilotChatRequest {
1007 n: 1,
1008 stream: model.uses_streaming(),
1009 temperature: temperature.unwrap_or(0.1),
1010 model: model.id().to_string(),
1011 messages,
1012 tools,
1013 tool_choice: tool_choice.map(|choice| match choice {
1014 LanguageModelToolChoice::Auto => ToolChoice::Auto,
1015 LanguageModelToolChoice::Any => ToolChoice::Required,
1016 LanguageModelToolChoice::None => ToolChoice::None,
1017 }),
1018 thinking_budget: if thinking_allowed && model.supports_thinking() {
1019 compute_thinking_budget(
1020 model.min_thinking_budget(),
1021 model.max_thinking_budget(),
1022 model.max_output_tokens() as u32,
1023 )
1024 } else {
1025 None
1026 },
1027 })
1028}
1029
1030fn compute_thinking_budget(
1031 min_budget: Option<u32>,
1032 max_budget: Option<u32>,
1033 max_output_tokens: u32,
1034) -> Option<u32> {
1035 let configured_budget: u32 = 16000;
1036 let min_budget = min_budget.unwrap_or(1024);
1037 let max_budget = max_budget.unwrap_or(max_output_tokens.saturating_sub(1));
1038 let normalized = configured_budget.max(min_budget);
1039 Some(
1040 normalized
1041 .min(max_budget)
1042 .min(max_output_tokens.saturating_sub(1)),
1043 )
1044}
1045
1046fn intent_to_chat_location(intent: Option<CompletionIntent>) -> ChatLocation {
1047 match intent {
1048 Some(CompletionIntent::UserPrompt) => ChatLocation::Agent,
1049 Some(CompletionIntent::Subagent) => ChatLocation::Agent,
1050 Some(CompletionIntent::ToolResults) => ChatLocation::Agent,
1051 Some(CompletionIntent::ThreadSummarization) => ChatLocation::Panel,
1052 Some(CompletionIntent::ThreadContextSummarization) => ChatLocation::Panel,
1053 Some(CompletionIntent::CreateFile) => ChatLocation::Agent,
1054 Some(CompletionIntent::EditFile) => ChatLocation::Agent,
1055 Some(CompletionIntent::InlineAssist) => ChatLocation::Editor,
1056 Some(CompletionIntent::TerminalInlineAssist) => ChatLocation::Terminal,
1057 Some(CompletionIntent::GenerateGitCommitMessage) => ChatLocation::Other,
1058 None => ChatLocation::Panel,
1059 }
1060}
1061
1062fn into_copilot_responses(
1063 model: &CopilotChatModel,
1064 request: LanguageModelRequest,
1065) -> copilot_responses::Request {
1066 use copilot_responses as responses;
1067
1068 let LanguageModelRequest {
1069 thread_id: _,
1070 prompt_id: _,
1071 intent: _,
1072 messages,
1073 tools,
1074 tool_choice,
1075 stop: _,
1076 temperature,
1077 thinking_allowed,
1078 thinking_effort,
1079 speed: _,
1080 } = request;
1081
1082 let mut input_items: Vec<responses::ResponseInputItem> = Vec::new();
1083
1084 for message in messages {
1085 match message.role {
1086 Role::User => {
1087 for content in &message.content {
1088 if let MessageContent::ToolResult(tool_result) = content {
1089 let output = match &tool_result.content {
1090 LanguageModelToolResultContent::Text(text) => {
1091 responses::ResponseFunctionOutput::Text(text.to_string())
1092 }
1093 LanguageModelToolResultContent::Image(image) => {
1094 if model.supports_vision() {
1095 responses::ResponseFunctionOutput::Content(vec![
1096 responses::ResponseInputContent::InputImage {
1097 image_url: Some(image.to_base64_url()),
1098 detail: Default::default(),
1099 },
1100 ])
1101 } else {
1102 debug_panic!(
1103 "This should be caught at {} level",
1104 tool_result.tool_name
1105 );
1106 responses::ResponseFunctionOutput::Text(
1107 "[Tool responded with an image, but this model does not support vision]".into(),
1108 )
1109 }
1110 }
1111 };
1112
1113 input_items.push(responses::ResponseInputItem::FunctionCallOutput {
1114 call_id: tool_result.tool_use_id.to_string(),
1115 output,
1116 status: None,
1117 });
1118 }
1119 }
1120
1121 let mut parts: Vec<responses::ResponseInputContent> = Vec::new();
1122 for content in &message.content {
1123 match content {
1124 MessageContent::Text(text) => {
1125 parts.push(responses::ResponseInputContent::InputText {
1126 text: text.clone(),
1127 });
1128 }
1129
1130 MessageContent::Image(image) => {
1131 if model.supports_vision() {
1132 parts.push(responses::ResponseInputContent::InputImage {
1133 image_url: Some(image.to_base64_url()),
1134 detail: Default::default(),
1135 });
1136 }
1137 }
1138 _ => {}
1139 }
1140 }
1141
1142 if !parts.is_empty() {
1143 input_items.push(responses::ResponseInputItem::Message {
1144 role: "user".into(),
1145 content: Some(parts),
1146 status: None,
1147 });
1148 }
1149 }
1150
1151 Role::Assistant => {
1152 for content in &message.content {
1153 if let MessageContent::ToolUse(tool_use) = content {
1154 input_items.push(responses::ResponseInputItem::FunctionCall {
1155 call_id: tool_use.id.to_string(),
1156 name: tool_use.name.to_string(),
1157 arguments: tool_use.raw_input.clone(),
1158 status: None,
1159 thought_signature: tool_use.thought_signature.clone(),
1160 });
1161 }
1162 }
1163
1164 for content in &message.content {
1165 if let MessageContent::RedactedThinking(data) = content {
1166 input_items.push(responses::ResponseInputItem::Reasoning {
1167 id: None,
1168 summary: Vec::new(),
1169 encrypted_content: data.clone(),
1170 });
1171 }
1172 }
1173
1174 let mut parts: Vec<responses::ResponseInputContent> = Vec::new();
1175 for content in &message.content {
1176 match content {
1177 MessageContent::Text(text) => {
1178 parts.push(responses::ResponseInputContent::OutputText {
1179 text: text.clone(),
1180 });
1181 }
1182 MessageContent::Image(_) => {
1183 parts.push(responses::ResponseInputContent::OutputText {
1184 text: "[image omitted]".to_string(),
1185 });
1186 }
1187 _ => {}
1188 }
1189 }
1190
1191 if !parts.is_empty() {
1192 input_items.push(responses::ResponseInputItem::Message {
1193 role: "assistant".into(),
1194 content: Some(parts),
1195 status: Some("completed".into()),
1196 });
1197 }
1198 }
1199
1200 Role::System => {
1201 let mut parts: Vec<responses::ResponseInputContent> = Vec::new();
1202 for content in &message.content {
1203 if let MessageContent::Text(text) = content {
1204 parts.push(responses::ResponseInputContent::InputText {
1205 text: text.clone(),
1206 });
1207 }
1208 }
1209
1210 if !parts.is_empty() {
1211 input_items.push(responses::ResponseInputItem::Message {
1212 role: "system".into(),
1213 content: Some(parts),
1214 status: None,
1215 });
1216 }
1217 }
1218 }
1219 }
1220
1221 let converted_tools: Vec<responses::ToolDefinition> = tools
1222 .into_iter()
1223 .map(|tool| responses::ToolDefinition::Function {
1224 name: tool.name,
1225 description: Some(tool.description),
1226 parameters: Some(tool.input_schema),
1227 strict: None,
1228 })
1229 .collect();
1230
1231 let mapped_tool_choice = tool_choice.map(|choice| match choice {
1232 LanguageModelToolChoice::Auto => responses::ToolChoice::Auto,
1233 LanguageModelToolChoice::Any => responses::ToolChoice::Required,
1234 LanguageModelToolChoice::None => responses::ToolChoice::None,
1235 });
1236
1237 responses::Request {
1238 model: model.id().to_string(),
1239 input: input_items,
1240 stream: model.uses_streaming(),
1241 temperature,
1242 tools: converted_tools,
1243 tool_choice: mapped_tool_choice,
1244 reasoning: if thinking_allowed {
1245 let effort = thinking_effort
1246 .as_deref()
1247 .and_then(|e| e.parse::<copilot_responses::ReasoningEffort>().ok())
1248 .unwrap_or(copilot_responses::ReasoningEffort::Medium);
1249 Some(copilot_responses::ReasoningConfig {
1250 effort,
1251 summary: Some(copilot_responses::ReasoningSummary::Detailed),
1252 })
1253 } else {
1254 None
1255 },
1256 include: Some(vec![
1257 copilot_responses::ResponseIncludable::ReasoningEncryptedContent,
1258 ]),
1259 store: false,
1260 }
1261}
1262
1263#[cfg(test)]
1264mod tests {
1265 use super::*;
1266 use copilot_chat::responses;
1267 use futures::StreamExt;
1268
1269 fn map_events(events: Vec<responses::StreamEvent>) -> Vec<LanguageModelCompletionEvent> {
1270 futures::executor::block_on(async {
1271 CopilotResponsesEventMapper::new()
1272 .map_stream(Box::pin(futures::stream::iter(events.into_iter().map(Ok))))
1273 .collect::<Vec<_>>()
1274 .await
1275 .into_iter()
1276 .map(Result::unwrap)
1277 .collect()
1278 })
1279 }
1280
1281 #[test]
1282 fn responses_stream_maps_text_and_usage() {
1283 let events = vec![
1284 responses::StreamEvent::OutputItemAdded {
1285 output_index: 0,
1286 sequence_number: None,
1287 item: responses::ResponseOutputItem::Message {
1288 id: "msg_1".into(),
1289 role: "assistant".into(),
1290 content: Some(Vec::new()),
1291 },
1292 },
1293 responses::StreamEvent::OutputTextDelta {
1294 item_id: "msg_1".into(),
1295 output_index: 0,
1296 delta: "Hello".into(),
1297 },
1298 responses::StreamEvent::Completed {
1299 response: responses::Response {
1300 usage: Some(responses::ResponseUsage {
1301 input_tokens: Some(5),
1302 output_tokens: Some(3),
1303 total_tokens: Some(8),
1304 }),
1305 ..Default::default()
1306 },
1307 },
1308 ];
1309
1310 let mapped = map_events(events);
1311 assert!(matches!(
1312 mapped[0],
1313 LanguageModelCompletionEvent::StartMessage { ref message_id } if message_id == "msg_1"
1314 ));
1315 assert!(matches!(
1316 mapped[1],
1317 LanguageModelCompletionEvent::Text(ref text) if text == "Hello"
1318 ));
1319 assert!(matches!(
1320 mapped[2],
1321 LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
1322 input_tokens: 5,
1323 output_tokens: 3,
1324 ..
1325 })
1326 ));
1327 assert!(matches!(
1328 mapped[3],
1329 LanguageModelCompletionEvent::Stop(StopReason::EndTurn)
1330 ));
1331 }
1332
1333 #[test]
1334 fn responses_stream_maps_tool_calls() {
1335 let events = vec![responses::StreamEvent::OutputItemDone {
1336 output_index: 0,
1337 sequence_number: None,
1338 item: responses::ResponseOutputItem::FunctionCall {
1339 id: Some("fn_1".into()),
1340 call_id: "call_1".into(),
1341 name: "do_it".into(),
1342 arguments: "{\"x\":1}".into(),
1343 status: None,
1344 thought_signature: None,
1345 },
1346 }];
1347
1348 let mapped = map_events(events);
1349 assert!(matches!(
1350 mapped[0],
1351 LanguageModelCompletionEvent::ToolUse(ref use_) if use_.id.to_string() == "call_1" && use_.name.as_ref() == "do_it"
1352 ));
1353 assert!(matches!(
1354 mapped[1],
1355 LanguageModelCompletionEvent::Stop(StopReason::ToolUse)
1356 ));
1357 }
1358
1359 #[test]
1360 fn responses_stream_handles_json_parse_error() {
1361 let events = vec![responses::StreamEvent::OutputItemDone {
1362 output_index: 0,
1363 sequence_number: None,
1364 item: responses::ResponseOutputItem::FunctionCall {
1365 id: Some("fn_1".into()),
1366 call_id: "call_1".into(),
1367 name: "do_it".into(),
1368 arguments: "{not json}".into(),
1369 status: None,
1370 thought_signature: None,
1371 },
1372 }];
1373
1374 let mapped = map_events(events);
1375 assert!(matches!(
1376 mapped[0],
1377 LanguageModelCompletionEvent::ToolUseJsonParseError { ref id, ref tool_name, .. }
1378 if id.to_string() == "call_1" && tool_name.as_ref() == "do_it"
1379 ));
1380 assert!(matches!(
1381 mapped[1],
1382 LanguageModelCompletionEvent::Stop(StopReason::ToolUse)
1383 ));
1384 }
1385
1386 #[test]
1387 fn responses_stream_maps_reasoning_summary_and_encrypted_content() {
1388 let events = vec![responses::StreamEvent::OutputItemDone {
1389 output_index: 0,
1390 sequence_number: None,
1391 item: responses::ResponseOutputItem::Reasoning {
1392 id: "r1".into(),
1393 summary: Some(vec![responses::ResponseReasoningItem {
1394 kind: "summary_text".into(),
1395 text: "Chain".into(),
1396 }]),
1397 encrypted_content: Some("ENC".into()),
1398 },
1399 }];
1400
1401 let mapped = map_events(events);
1402 assert!(matches!(
1403 mapped[0],
1404 LanguageModelCompletionEvent::Thinking { ref text, signature: None } if text == "Chain"
1405 ));
1406 assert!(matches!(
1407 mapped[1],
1408 LanguageModelCompletionEvent::RedactedThinking { ref data } if data == "ENC"
1409 ));
1410 }
1411
1412 #[test]
1413 fn responses_stream_handles_incomplete_max_tokens() {
1414 let events = vec![responses::StreamEvent::Incomplete {
1415 response: responses::Response {
1416 usage: Some(responses::ResponseUsage {
1417 input_tokens: Some(10),
1418 output_tokens: Some(0),
1419 total_tokens: Some(10),
1420 }),
1421 incomplete_details: Some(responses::IncompleteDetails {
1422 reason: Some(responses::IncompleteReason::MaxOutputTokens),
1423 }),
1424 ..Default::default()
1425 },
1426 }];
1427
1428 let mapped = map_events(events);
1429 assert!(matches!(
1430 mapped[0],
1431 LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
1432 input_tokens: 10,
1433 output_tokens: 0,
1434 ..
1435 })
1436 ));
1437 assert!(matches!(
1438 mapped[1],
1439 LanguageModelCompletionEvent::Stop(StopReason::MaxTokens)
1440 ));
1441 }
1442
1443 #[test]
1444 fn responses_stream_handles_incomplete_content_filter() {
1445 let events = vec![responses::StreamEvent::Incomplete {
1446 response: responses::Response {
1447 usage: None,
1448 incomplete_details: Some(responses::IncompleteDetails {
1449 reason: Some(responses::IncompleteReason::ContentFilter),
1450 }),
1451 ..Default::default()
1452 },
1453 }];
1454
1455 let mapped = map_events(events);
1456 assert!(matches!(
1457 mapped.last().unwrap(),
1458 LanguageModelCompletionEvent::Stop(StopReason::Refusal)
1459 ));
1460 }
1461
1462 #[test]
1463 fn responses_stream_completed_no_duplicate_after_tool_use() {
1464 let events = vec![
1465 responses::StreamEvent::OutputItemDone {
1466 output_index: 0,
1467 sequence_number: None,
1468 item: responses::ResponseOutputItem::FunctionCall {
1469 id: Some("fn_1".into()),
1470 call_id: "call_1".into(),
1471 name: "do_it".into(),
1472 arguments: "{}".into(),
1473 status: None,
1474 thought_signature: None,
1475 },
1476 },
1477 responses::StreamEvent::Completed {
1478 response: responses::Response::default(),
1479 },
1480 ];
1481
1482 let mapped = map_events(events);
1483
1484 let mut stop_count = 0usize;
1485 let mut saw_tool_use_stop = false;
1486 for event in mapped {
1487 if let LanguageModelCompletionEvent::Stop(reason) = event {
1488 stop_count += 1;
1489 if matches!(reason, StopReason::ToolUse) {
1490 saw_tool_use_stop = true;
1491 }
1492 }
1493 }
1494 assert_eq!(stop_count, 1, "should emit exactly one Stop event");
1495 assert!(saw_tool_use_stop, "Stop reason should be ToolUse");
1496 }
1497
1498 #[test]
1499 fn responses_stream_failed_maps_http_response_error() {
1500 let events = vec![responses::StreamEvent::Failed {
1501 response: responses::Response {
1502 error: Some(responses::ResponseError {
1503 code: "429".into(),
1504 message: "too many requests".into(),
1505 }),
1506 ..Default::default()
1507 },
1508 }];
1509
1510 let mapped_results = futures::executor::block_on(async {
1511 CopilotResponsesEventMapper::new()
1512 .map_stream(Box::pin(futures::stream::iter(events.into_iter().map(Ok))))
1513 .collect::<Vec<_>>()
1514 .await
1515 });
1516
1517 assert_eq!(mapped_results.len(), 1);
1518 match &mapped_results[0] {
1519 Err(LanguageModelCompletionError::HttpResponseError {
1520 status_code,
1521 message,
1522 ..
1523 }) => {
1524 assert_eq!(*status_code, http_client::StatusCode::TOO_MANY_REQUESTS);
1525 assert_eq!(message, "too many requests");
1526 }
1527 other => panic!("expected HttpResponseError, got {:?}", other),
1528 }
1529 }
1530
1531 #[test]
1532 fn chat_completions_stream_maps_reasoning_data() {
1533 use copilot_chat::{
1534 FunctionChunk, ResponseChoice, ResponseDelta, ResponseEvent, Role, ToolCallChunk,
1535 };
1536
1537 let events = vec![
1538 ResponseEvent {
1539 choices: vec![ResponseChoice {
1540 index: Some(0),
1541 finish_reason: None,
1542 delta: Some(ResponseDelta {
1543 content: None,
1544 role: Some(Role::Assistant),
1545 tool_calls: vec![ToolCallChunk {
1546 index: Some(0),
1547 id: Some("call_abc123".to_string()),
1548 function: Some(FunctionChunk {
1549 name: Some("list_directory".to_string()),
1550 arguments: Some("{\"path\":\"test\"}".to_string()),
1551 thought_signature: None,
1552 }),
1553 }],
1554 reasoning_opaque: Some("encrypted_reasoning_token_xyz".to_string()),
1555 reasoning_text: Some("Let me check the directory".to_string()),
1556 }),
1557 message: None,
1558 }],
1559 id: "chatcmpl-123".to_string(),
1560 usage: None,
1561 },
1562 ResponseEvent {
1563 choices: vec![ResponseChoice {
1564 index: Some(0),
1565 finish_reason: Some("tool_calls".to_string()),
1566 delta: Some(ResponseDelta {
1567 content: None,
1568 role: None,
1569 tool_calls: vec![],
1570 reasoning_opaque: None,
1571 reasoning_text: None,
1572 }),
1573 message: None,
1574 }],
1575 id: "chatcmpl-123".to_string(),
1576 usage: None,
1577 },
1578 ];
1579
1580 let mapped = futures::executor::block_on(async {
1581 map_to_language_model_completion_events(
1582 Box::pin(futures::stream::iter(events.into_iter().map(Ok))),
1583 true,
1584 )
1585 .collect::<Vec<_>>()
1586 .await
1587 });
1588
1589 let mut has_reasoning_details = false;
1590 let mut has_tool_use = false;
1591 let mut reasoning_opaque_value: Option<String> = None;
1592 let mut reasoning_text_value: Option<String> = None;
1593
1594 for event_result in mapped {
1595 match event_result {
1596 Ok(LanguageModelCompletionEvent::ReasoningDetails(details)) => {
1597 has_reasoning_details = true;
1598 reasoning_opaque_value = details
1599 .get("reasoning_opaque")
1600 .and_then(|v| v.as_str())
1601 .map(|s| s.to_string());
1602 reasoning_text_value = details
1603 .get("reasoning_text")
1604 .and_then(|v| v.as_str())
1605 .map(|s| s.to_string());
1606 }
1607 Ok(LanguageModelCompletionEvent::ToolUse(tool_use)) => {
1608 has_tool_use = true;
1609 assert_eq!(tool_use.id.to_string(), "call_abc123");
1610 assert_eq!(tool_use.name.as_ref(), "list_directory");
1611 }
1612 _ => {}
1613 }
1614 }
1615
1616 assert!(
1617 has_reasoning_details,
1618 "Should emit ReasoningDetails event for Gemini 3 reasoning"
1619 );
1620 assert!(has_tool_use, "Should emit ToolUse event");
1621 assert_eq!(
1622 reasoning_opaque_value,
1623 Some("encrypted_reasoning_token_xyz".to_string()),
1624 "Should capture reasoning_opaque"
1625 );
1626 assert_eq!(
1627 reasoning_text_value,
1628 Some("Let me check the directory".to_string()),
1629 "Should capture reasoning_text"
1630 );
1631 }
1632}