1use std::pin::Pin;
2use std::str::FromStr as _;
3use std::sync::Arc;
4
5use anthropic::AnthropicModelMode;
6use anyhow::{Result, anyhow};
7use collections::HashMap;
8use copilot::{GlobalCopilotAuth, Status};
9use copilot_chat::responses as copilot_responses;
10use copilot_chat::{
11 ChatLocation, ChatMessage, ChatMessageContent, ChatMessagePart, CopilotChat,
12 CopilotChatConfiguration, Function, FunctionContent, ImageUrl, Model as CopilotChatModel,
13 ModelVendor, Request as CopilotChatRequest, ResponseEvent, Tool, ToolCall, ToolCallContent,
14 ToolChoice,
15};
16use futures::future::BoxFuture;
17use futures::stream::BoxStream;
18use futures::{FutureExt, Stream, StreamExt};
19use gpui::{AnyView, App, AsyncApp, Entity, Subscription, Task};
20use http_client::StatusCode;
21use language::language_settings::all_language_settings;
22use language_model::{
23 AuthenticateError, CompletionIntent, IconOrSvg, LanguageModel, LanguageModelCompletionError,
24 LanguageModelCompletionEvent, LanguageModelCostInfo, LanguageModelEffortLevel, LanguageModelId,
25 LanguageModelName, LanguageModelProvider, LanguageModelProviderId, LanguageModelProviderName,
26 LanguageModelProviderState, LanguageModelRequest, LanguageModelRequestMessage,
27 LanguageModelToolChoice, LanguageModelToolResultContent, LanguageModelToolSchemaFormat,
28 LanguageModelToolUse, MessageContent, RateLimiter, Role, StopReason, TokenUsage,
29};
30use settings::SettingsStore;
31use ui::prelude::*;
32use util::debug_panic;
33
34use crate::provider::anthropic::{AnthropicEventMapper, into_anthropic};
35use language_model::util::{fix_streamed_json, parse_tool_arguments};
36
37const PROVIDER_ID: LanguageModelProviderId = LanguageModelProviderId::new("copilot_chat");
38const PROVIDER_NAME: LanguageModelProviderName =
39 LanguageModelProviderName::new("GitHub Copilot Chat");
40
41pub struct CopilotChatLanguageModelProvider {
42 state: Entity<State>,
43}
44
45pub struct State {
46 _copilot_chat_subscription: Option<Subscription>,
47 _settings_subscription: Subscription,
48}
49
50impl State {
51 fn is_authenticated(&self, cx: &App) -> bool {
52 CopilotChat::global(cx)
53 .map(|m| m.read(cx).is_authenticated())
54 .unwrap_or(false)
55 }
56}
57
58impl CopilotChatLanguageModelProvider {
59 pub fn new(cx: &mut App) -> Self {
60 let state = cx.new(|cx| {
61 let copilot_chat_subscription = CopilotChat::global(cx)
62 .map(|copilot_chat| cx.observe(&copilot_chat, |_, _, cx| cx.notify()));
63 State {
64 _copilot_chat_subscription: copilot_chat_subscription,
65 _settings_subscription: cx.observe_global::<SettingsStore>(|_, cx| {
66 if let Some(copilot_chat) = CopilotChat::global(cx) {
67 let language_settings = all_language_settings(None, cx);
68 let configuration = CopilotChatConfiguration {
69 enterprise_uri: language_settings
70 .edit_predictions
71 .copilot
72 .enterprise_uri
73 .clone(),
74 };
75 copilot_chat.update(cx, |chat, cx| {
76 chat.set_configuration(configuration, cx);
77 });
78 }
79 cx.notify();
80 }),
81 }
82 });
83
84 Self { state }
85 }
86
87 fn create_language_model(&self, model: CopilotChatModel) -> Arc<dyn LanguageModel> {
88 Arc::new(CopilotChatLanguageModel {
89 model,
90 request_limiter: RateLimiter::new(4),
91 })
92 }
93}
94
95impl LanguageModelProviderState for CopilotChatLanguageModelProvider {
96 type ObservableEntity = State;
97
98 fn observable_entity(&self) -> Option<Entity<Self::ObservableEntity>> {
99 Some(self.state.clone())
100 }
101}
102
103impl LanguageModelProvider for CopilotChatLanguageModelProvider {
104 fn id(&self) -> LanguageModelProviderId {
105 PROVIDER_ID
106 }
107
108 fn name(&self) -> LanguageModelProviderName {
109 PROVIDER_NAME
110 }
111
112 fn icon(&self) -> IconOrSvg {
113 IconOrSvg::Icon(IconName::Copilot)
114 }
115
116 fn default_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
117 let models = CopilotChat::global(cx).and_then(|m| m.read(cx).models())?;
118 models
119 .first()
120 .map(|model| self.create_language_model(model.clone()))
121 }
122
123 fn default_fast_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
124 // The default model should be Copilot Chat's 'base model', which is likely a relatively fast
125 // model (e.g. 4o) and a sensible choice when considering premium requests
126 self.default_model(cx)
127 }
128
129 fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
130 let Some(models) = CopilotChat::global(cx).and_then(|m| m.read(cx).models()) else {
131 return Vec::new();
132 };
133 models
134 .iter()
135 .map(|model| self.create_language_model(model.clone()))
136 .collect()
137 }
138
139 fn is_authenticated(&self, cx: &App) -> bool {
140 self.state.read(cx).is_authenticated(cx)
141 }
142
143 fn authenticate(&self, cx: &mut App) -> Task<Result<(), AuthenticateError>> {
144 if self.is_authenticated(cx) {
145 return Task::ready(Ok(()));
146 };
147
148 let Some(copilot) = GlobalCopilotAuth::try_global(cx).cloned() else {
149 return Task::ready(Err(anyhow!(concat!(
150 "Copilot must be enabled for Copilot Chat to work. ",
151 "Please enable Copilot and try again."
152 ))
153 .into()));
154 };
155
156 let err = match copilot.0.read(cx).status() {
157 Status::Authorized => return Task::ready(Ok(())),
158 Status::Disabled => anyhow!(
159 "Copilot must be enabled for Copilot Chat to work. Please enable Copilot and try again."
160 ),
161 Status::Error(err) => anyhow!(format!(
162 "Received the following error while signing into Copilot: {err}"
163 )),
164 Status::Starting { task: _ } => anyhow!(
165 "Copilot is still starting, please wait for Copilot to start then try again"
166 ),
167 Status::Unauthorized => anyhow!(
168 "Unable to authorize with Copilot. Please make sure that you have an active Copilot and Copilot Chat subscription."
169 ),
170 Status::SignedOut { .. } => {
171 anyhow!("You have signed out of Copilot. Please sign in to Copilot and try again.")
172 }
173 Status::SigningIn { prompt: _ } => anyhow!("Still signing into Copilot..."),
174 };
175
176 Task::ready(Err(err.into()))
177 }
178
179 fn configuration_view(
180 &self,
181 _target_agent: language_model::ConfigurationViewTargetAgent,
182 _: &mut Window,
183 cx: &mut App,
184 ) -> AnyView {
185 cx.new(|cx| {
186 copilot_ui::ConfigurationView::new(
187 |cx| {
188 CopilotChat::global(cx)
189 .map(|m| m.read(cx).is_authenticated())
190 .unwrap_or(false)
191 },
192 copilot_ui::ConfigurationMode::Chat,
193 cx,
194 )
195 })
196 .into()
197 }
198
199 fn reset_credentials(&self, _cx: &mut App) -> Task<Result<()>> {
200 Task::ready(Err(anyhow!(
201 "Signing out of GitHub Copilot Chat is currently not supported."
202 )))
203 }
204}
205
206fn collect_tiktoken_messages(
207 request: LanguageModelRequest,
208) -> Vec<tiktoken_rs::ChatCompletionRequestMessage> {
209 request
210 .messages
211 .into_iter()
212 .map(|message| tiktoken_rs::ChatCompletionRequestMessage {
213 role: match message.role {
214 Role::User => "user".into(),
215 Role::Assistant => "assistant".into(),
216 Role::System => "system".into(),
217 },
218 content: Some(message.string_contents()),
219 name: None,
220 function_call: None,
221 })
222 .collect::<Vec<_>>()
223}
224
225pub struct CopilotChatLanguageModel {
226 model: CopilotChatModel,
227 request_limiter: RateLimiter,
228}
229
230impl LanguageModel for CopilotChatLanguageModel {
231 fn id(&self) -> LanguageModelId {
232 LanguageModelId::from(self.model.id().to_string())
233 }
234
235 fn name(&self) -> LanguageModelName {
236 LanguageModelName::from(self.model.display_name().to_string())
237 }
238
239 fn provider_id(&self) -> LanguageModelProviderId {
240 PROVIDER_ID
241 }
242
243 fn provider_name(&self) -> LanguageModelProviderName {
244 PROVIDER_NAME
245 }
246
247 fn supports_tools(&self) -> bool {
248 self.model.supports_tools()
249 }
250
251 fn supports_streaming_tools(&self) -> bool {
252 true
253 }
254
255 fn supports_images(&self) -> bool {
256 self.model.supports_vision()
257 }
258
259 fn supports_thinking(&self) -> bool {
260 self.model.can_think()
261 }
262
263 fn supported_effort_levels(&self) -> Vec<LanguageModelEffortLevel> {
264 let levels = self.model.reasoning_effort_levels();
265 if levels.is_empty() {
266 return vec![];
267 }
268 levels
269 .iter()
270 .map(|level| {
271 let name = match level.as_str() {
272 "low" => "Low".into(),
273 "medium" => "Medium".into(),
274 "high" => "High".into(),
275 "xhigh" => "Extra High".into(),
276 _ => language_model::SharedString::from(level.clone()),
277 };
278 LanguageModelEffortLevel {
279 name,
280 value: language_model::SharedString::from(level.clone()),
281 is_default: level == "high",
282 }
283 })
284 .collect()
285 }
286
287 fn tool_input_format(&self) -> LanguageModelToolSchemaFormat {
288 match self.model.vendor() {
289 ModelVendor::OpenAI | ModelVendor::Anthropic => {
290 LanguageModelToolSchemaFormat::JsonSchema
291 }
292 ModelVendor::Google | ModelVendor::XAI | ModelVendor::Unknown => {
293 LanguageModelToolSchemaFormat::JsonSchemaSubset
294 }
295 }
296 }
297
298 fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
299 match choice {
300 LanguageModelToolChoice::Auto
301 | LanguageModelToolChoice::Any
302 | LanguageModelToolChoice::None => self.supports_tools(),
303 }
304 }
305
306 fn model_cost_info(&self) -> Option<LanguageModelCostInfo> {
307 LanguageModelCostInfo::RequestCost {
308 cost_per_request: self.model.multiplier(),
309 }
310 .into()
311 }
312
313 fn telemetry_id(&self) -> String {
314 format!("copilot_chat/{}", self.model.id())
315 }
316
317 fn max_token_count(&self) -> u64 {
318 self.model.max_token_count()
319 }
320
321 fn count_tokens(
322 &self,
323 request: LanguageModelRequest,
324 cx: &App,
325 ) -> BoxFuture<'static, Result<u64>> {
326 let model = self.model.clone();
327 cx.background_spawn(async move {
328 let messages = collect_tiktoken_messages(request);
329 // Copilot uses OpenAI tiktoken tokenizer for all it's model irrespective of the underlying provider(vendor).
330 let tokenizer_model = match model.tokenizer() {
331 Some("o200k_base") => "gpt-4o",
332 Some("cl100k_base") => "gpt-4",
333 _ => "gpt-4o",
334 };
335
336 tiktoken_rs::num_tokens_from_messages(tokenizer_model, &messages)
337 .map(|tokens| tokens as u64)
338 })
339 .boxed()
340 }
341
342 fn stream_completion(
343 &self,
344 request: LanguageModelRequest,
345 cx: &AsyncApp,
346 ) -> BoxFuture<
347 'static,
348 Result<
349 BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>,
350 LanguageModelCompletionError,
351 >,
352 > {
353 let is_user_initiated = request.intent.is_none_or(|intent| match intent {
354 CompletionIntent::UserPrompt
355 | CompletionIntent::ThreadContextSummarization
356 | CompletionIntent::InlineAssist
357 | CompletionIntent::TerminalInlineAssist
358 | CompletionIntent::GenerateGitCommitMessage => true,
359
360 CompletionIntent::Subagent
361 | CompletionIntent::ToolResults
362 | CompletionIntent::ThreadSummarization
363 | CompletionIntent::CreateFile
364 | CompletionIntent::EditFile => false,
365 });
366
367 if self.model.supports_messages() {
368 let location = intent_to_chat_location(request.intent);
369 let model = self.model.clone();
370 let request_limiter = self.request_limiter.clone();
371 let future = cx.spawn(async move |cx| {
372 let effort = request
373 .thinking_effort
374 .as_ref()
375 .and_then(|e| anthropic::Effort::from_str(e).ok());
376
377 let mut anthropic_request = into_anthropic(
378 request,
379 model.id().to_string(),
380 0.0,
381 model.max_output_tokens() as u64,
382 if model.supports_adaptive_thinking() {
383 AnthropicModelMode::Thinking {
384 budget_tokens: None,
385 }
386 } else if model.supports_thinking() {
387 AnthropicModelMode::Thinking {
388 budget_tokens: compute_thinking_budget(
389 model.min_thinking_budget(),
390 model.max_thinking_budget(),
391 model.max_output_tokens() as u32,
392 ),
393 }
394 } else {
395 AnthropicModelMode::Default
396 },
397 );
398
399 anthropic_request.temperature = None;
400
401 // The Copilot proxy doesn't support eager_input_streaming on tools.
402 for tool in &mut anthropic_request.tools {
403 tool.eager_input_streaming = false;
404 }
405
406 if model.supports_adaptive_thinking() {
407 if anthropic_request.thinking.is_some() {
408 anthropic_request.thinking = Some(anthropic::Thinking::Adaptive);
409 anthropic_request.output_config =
410 effort.map(|effort| anthropic::OutputConfig {
411 effort: Some(effort),
412 });
413 }
414 }
415
416 let anthropic_beta =
417 if !model.supports_adaptive_thinking() && model.supports_thinking() {
418 Some("interleaved-thinking-2025-05-14".to_string())
419 } else {
420 None
421 };
422
423 let body = serde_json::to_string(&anthropic::StreamingRequest {
424 base: anthropic_request,
425 stream: true,
426 })
427 .map_err(|e| anyhow::anyhow!(e))?;
428
429 let stream = CopilotChat::stream_messages(
430 body,
431 location,
432 is_user_initiated,
433 anthropic_beta,
434 cx.clone(),
435 );
436
437 request_limiter
438 .stream(async move {
439 let events = stream.await?;
440 let mapper = AnthropicEventMapper::new();
441 Ok(mapper.map_stream(events).boxed())
442 })
443 .await
444 });
445 return async move { Ok(future.await?.boxed()) }.boxed();
446 }
447
448 if self.model.supports_response() {
449 let location = intent_to_chat_location(request.intent);
450 let responses_request = into_copilot_responses(&self.model, request);
451 let request_limiter = self.request_limiter.clone();
452 let future = cx.spawn(async move |cx| {
453 let request = CopilotChat::stream_response(
454 responses_request,
455 location,
456 is_user_initiated,
457 cx.clone(),
458 );
459 request_limiter
460 .stream(async move {
461 let stream = request.await?;
462 let mapper = CopilotResponsesEventMapper::new();
463 Ok(mapper.map_stream(stream).boxed())
464 })
465 .await
466 });
467 return async move { Ok(future.await?.boxed()) }.boxed();
468 }
469
470 let location = intent_to_chat_location(request.intent);
471 let copilot_request = match into_copilot_chat(&self.model, request) {
472 Ok(request) => request,
473 Err(err) => return futures::future::ready(Err(err.into())).boxed(),
474 };
475 let is_streaming = copilot_request.stream;
476
477 let request_limiter = self.request_limiter.clone();
478 let future = cx.spawn(async move |cx| {
479 let request = CopilotChat::stream_completion(
480 copilot_request,
481 location,
482 is_user_initiated,
483 cx.clone(),
484 );
485 request_limiter
486 .stream(async move {
487 let response = request.await?;
488 Ok(map_to_language_model_completion_events(
489 response,
490 is_streaming,
491 ))
492 })
493 .await
494 });
495 async move { Ok(future.await?.boxed()) }.boxed()
496 }
497}
498
499pub fn map_to_language_model_completion_events(
500 events: Pin<Box<dyn Send + Stream<Item = Result<ResponseEvent>>>>,
501 is_streaming: bool,
502) -> impl Stream<Item = Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
503 #[derive(Default)]
504 struct RawToolCall {
505 id: String,
506 name: String,
507 arguments: String,
508 thought_signature: Option<String>,
509 }
510
511 struct State {
512 events: Pin<Box<dyn Send + Stream<Item = Result<ResponseEvent>>>>,
513 tool_calls_by_index: HashMap<usize, RawToolCall>,
514 reasoning_opaque: Option<String>,
515 reasoning_text: Option<String>,
516 }
517
518 futures::stream::unfold(
519 State {
520 events,
521 tool_calls_by_index: HashMap::default(),
522 reasoning_opaque: None,
523 reasoning_text: None,
524 },
525 move |mut state| async move {
526 if let Some(event) = state.events.next().await {
527 match event {
528 Ok(event) => {
529 let Some(choice) = event.choices.first() else {
530 return Some((
531 vec![Err(anyhow!("Response contained no choices").into())],
532 state,
533 ));
534 };
535
536 let delta = if is_streaming {
537 choice.delta.as_ref()
538 } else {
539 choice.message.as_ref()
540 };
541
542 let Some(delta) = delta else {
543 return Some((
544 vec![Err(anyhow!("Response contained no delta").into())],
545 state,
546 ));
547 };
548
549 let mut events = Vec::new();
550 if let Some(content) = delta.content.clone() {
551 events.push(Ok(LanguageModelCompletionEvent::Text(content)));
552 }
553
554 // Capture reasoning data from the delta (e.g. for Gemini 3)
555 if let Some(opaque) = delta.reasoning_opaque.clone() {
556 state.reasoning_opaque = Some(opaque);
557 }
558 if let Some(text) = delta.reasoning_text.clone() {
559 state.reasoning_text = Some(text);
560 }
561
562 for (index, tool_call) in delta.tool_calls.iter().enumerate() {
563 let tool_index = tool_call.index.unwrap_or(index);
564 let entry = state.tool_calls_by_index.entry(tool_index).or_default();
565
566 if let Some(tool_id) = tool_call.id.clone() {
567 entry.id = tool_id;
568 }
569
570 if let Some(function) = tool_call.function.as_ref() {
571 if let Some(name) = function.name.clone() {
572 entry.name = name;
573 }
574
575 if let Some(arguments) = function.arguments.clone() {
576 entry.arguments.push_str(&arguments);
577 }
578
579 if let Some(thought_signature) = function.thought_signature.clone()
580 {
581 entry.thought_signature = Some(thought_signature);
582 }
583 }
584
585 if !entry.id.is_empty() && !entry.name.is_empty() {
586 if let Ok(input) = serde_json::from_str::<serde_json::Value>(
587 &fix_streamed_json(&entry.arguments),
588 ) {
589 events.push(Ok(LanguageModelCompletionEvent::ToolUse(
590 LanguageModelToolUse {
591 id: entry.id.clone().into(),
592 name: entry.name.as_str().into(),
593 is_input_complete: false,
594 input,
595 raw_input: entry.arguments.clone(),
596 thought_signature: entry.thought_signature.clone(),
597 },
598 )));
599 }
600 }
601 }
602
603 if let Some(usage) = event.usage {
604 events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(
605 TokenUsage {
606 input_tokens: usage.prompt_tokens,
607 output_tokens: usage.completion_tokens,
608 cache_creation_input_tokens: 0,
609 cache_read_input_tokens: 0,
610 },
611 )));
612 }
613
614 match choice.finish_reason.as_deref() {
615 Some("stop") => {
616 events.push(Ok(LanguageModelCompletionEvent::Stop(
617 StopReason::EndTurn,
618 )));
619 }
620 Some("tool_calls") => {
621 // Gemini 3 models send reasoning_opaque/reasoning_text that must
622 // be preserved and sent back in subsequent requests. Emit as
623 // ReasoningDetails so the agent stores it in the message.
624 if state.reasoning_opaque.is_some()
625 || state.reasoning_text.is_some()
626 {
627 let mut details = serde_json::Map::new();
628 if let Some(opaque) = state.reasoning_opaque.take() {
629 details.insert(
630 "reasoning_opaque".to_string(),
631 serde_json::Value::String(opaque),
632 );
633 }
634 if let Some(text) = state.reasoning_text.take() {
635 details.insert(
636 "reasoning_text".to_string(),
637 serde_json::Value::String(text),
638 );
639 }
640 events.push(Ok(
641 LanguageModelCompletionEvent::ReasoningDetails(
642 serde_json::Value::Object(details),
643 ),
644 ));
645 }
646
647 events.extend(state.tool_calls_by_index.drain().map(
648 |(_, tool_call)| match parse_tool_arguments(
649 &tool_call.arguments,
650 ) {
651 Ok(input) => Ok(LanguageModelCompletionEvent::ToolUse(
652 LanguageModelToolUse {
653 id: tool_call.id.into(),
654 name: tool_call.name.as_str().into(),
655 is_input_complete: true,
656 input,
657 raw_input: tool_call.arguments,
658 thought_signature: tool_call.thought_signature,
659 },
660 )),
661 Err(error) => Ok(
662 LanguageModelCompletionEvent::ToolUseJsonParseError {
663 id: tool_call.id.into(),
664 tool_name: tool_call.name.as_str().into(),
665 raw_input: tool_call.arguments.into(),
666 json_parse_error: error.to_string(),
667 },
668 ),
669 },
670 ));
671
672 events.push(Ok(LanguageModelCompletionEvent::Stop(
673 StopReason::ToolUse,
674 )));
675 }
676 Some(stop_reason) => {
677 log::error!("Unexpected Copilot Chat stop_reason: {stop_reason:?}");
678 events.push(Ok(LanguageModelCompletionEvent::Stop(
679 StopReason::EndTurn,
680 )));
681 }
682 None => {}
683 }
684
685 return Some((events, state));
686 }
687 Err(err) => return Some((vec![Err(anyhow!(err).into())], state)),
688 }
689 }
690
691 None
692 },
693 )
694 .flat_map(futures::stream::iter)
695}
696
697pub struct CopilotResponsesEventMapper {
698 pending_stop_reason: Option<StopReason>,
699}
700
701impl CopilotResponsesEventMapper {
702 pub fn new() -> Self {
703 Self {
704 pending_stop_reason: None,
705 }
706 }
707
708 pub fn map_stream(
709 mut self,
710 events: Pin<Box<dyn Send + Stream<Item = Result<copilot_responses::StreamEvent>>>>,
711 ) -> impl Stream<Item = Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
712 {
713 events.flat_map(move |event| {
714 futures::stream::iter(match event {
715 Ok(event) => self.map_event(event),
716 Err(error) => vec![Err(LanguageModelCompletionError::from(anyhow!(error)))],
717 })
718 })
719 }
720
721 fn map_event(
722 &mut self,
723 event: copilot_responses::StreamEvent,
724 ) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
725 match event {
726 copilot_responses::StreamEvent::OutputItemAdded { item, .. } => match item {
727 copilot_responses::ResponseOutputItem::Message { id, .. } => {
728 vec![Ok(LanguageModelCompletionEvent::StartMessage {
729 message_id: id,
730 })]
731 }
732 _ => Vec::new(),
733 },
734
735 copilot_responses::StreamEvent::OutputTextDelta { delta, .. } => {
736 if delta.is_empty() {
737 Vec::new()
738 } else {
739 vec![Ok(LanguageModelCompletionEvent::Text(delta))]
740 }
741 }
742
743 copilot_responses::StreamEvent::OutputItemDone { item, .. } => match item {
744 copilot_responses::ResponseOutputItem::Message { .. } => Vec::new(),
745 copilot_responses::ResponseOutputItem::FunctionCall {
746 call_id,
747 name,
748 arguments,
749 thought_signature,
750 ..
751 } => {
752 let mut events = Vec::new();
753 match parse_tool_arguments(&arguments) {
754 Ok(input) => events.push(Ok(LanguageModelCompletionEvent::ToolUse(
755 LanguageModelToolUse {
756 id: call_id.into(),
757 name: name.as_str().into(),
758 is_input_complete: true,
759 input,
760 raw_input: arguments.clone(),
761 thought_signature,
762 },
763 ))),
764 Err(error) => {
765 events.push(Ok(LanguageModelCompletionEvent::ToolUseJsonParseError {
766 id: call_id.into(),
767 tool_name: name.as_str().into(),
768 raw_input: arguments.clone().into(),
769 json_parse_error: error.to_string(),
770 }))
771 }
772 }
773 // Record that we already emitted a tool-use stop so we can avoid duplicating
774 // a Stop event on Completed.
775 self.pending_stop_reason = Some(StopReason::ToolUse);
776 events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::ToolUse)));
777 events
778 }
779 copilot_responses::ResponseOutputItem::Reasoning {
780 summary,
781 encrypted_content,
782 ..
783 } => {
784 let mut events = Vec::new();
785
786 if let Some(blocks) = summary {
787 let mut text = String::new();
788 for block in blocks {
789 text.push_str(&block.text);
790 }
791 if !text.is_empty() {
792 events.push(Ok(LanguageModelCompletionEvent::Thinking {
793 text,
794 signature: None,
795 }));
796 }
797 }
798
799 if let Some(data) = encrypted_content {
800 events.push(Ok(LanguageModelCompletionEvent::RedactedThinking { data }));
801 }
802
803 events
804 }
805 },
806
807 copilot_responses::StreamEvent::Completed { response } => {
808 let mut events = Vec::new();
809 if let Some(usage) = response.usage {
810 events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
811 input_tokens: usage.input_tokens.unwrap_or(0),
812 output_tokens: usage.output_tokens.unwrap_or(0),
813 cache_creation_input_tokens: 0,
814 cache_read_input_tokens: 0,
815 })));
816 }
817 if self.pending_stop_reason.take() != Some(StopReason::ToolUse) {
818 events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::EndTurn)));
819 }
820 events
821 }
822
823 copilot_responses::StreamEvent::Incomplete { response } => {
824 let reason = response
825 .incomplete_details
826 .as_ref()
827 .and_then(|details| details.reason.as_ref());
828 let stop_reason = match reason {
829 Some(copilot_responses::IncompleteReason::MaxOutputTokens) => {
830 StopReason::MaxTokens
831 }
832 Some(copilot_responses::IncompleteReason::ContentFilter) => StopReason::Refusal,
833 _ => self
834 .pending_stop_reason
835 .take()
836 .unwrap_or(StopReason::EndTurn),
837 };
838
839 let mut events = Vec::new();
840 if let Some(usage) = response.usage {
841 events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
842 input_tokens: usage.input_tokens.unwrap_or(0),
843 output_tokens: usage.output_tokens.unwrap_or(0),
844 cache_creation_input_tokens: 0,
845 cache_read_input_tokens: 0,
846 })));
847 }
848 events.push(Ok(LanguageModelCompletionEvent::Stop(stop_reason)));
849 events
850 }
851
852 copilot_responses::StreamEvent::Failed { response } => {
853 let provider = PROVIDER_NAME;
854 let (status_code, message) = match response.error {
855 Some(error) => {
856 let status_code = StatusCode::from_str(&error.code)
857 .unwrap_or(StatusCode::INTERNAL_SERVER_ERROR);
858 (status_code, error.message)
859 }
860 None => (
861 StatusCode::INTERNAL_SERVER_ERROR,
862 "response.failed".to_string(),
863 ),
864 };
865 vec![Err(LanguageModelCompletionError::HttpResponseError {
866 provider,
867 status_code,
868 message,
869 })]
870 }
871
872 copilot_responses::StreamEvent::GenericError { error } => vec![Err(
873 LanguageModelCompletionError::Other(anyhow!(error.message)),
874 )],
875
876 copilot_responses::StreamEvent::Created { .. }
877 | copilot_responses::StreamEvent::Unknown => Vec::new(),
878 }
879 }
880}
881
882fn into_copilot_chat(
883 model: &CopilotChatModel,
884 request: LanguageModelRequest,
885) -> Result<CopilotChatRequest> {
886 let temperature = request.temperature;
887 let tool_choice = request.tool_choice;
888 let thinking_allowed = request.thinking_allowed;
889
890 let mut request_messages: Vec<LanguageModelRequestMessage> = Vec::new();
891 for message in request.messages {
892 if let Some(last_message) = request_messages.last_mut() {
893 if last_message.role == message.role {
894 last_message.content.extend(message.content);
895 } else {
896 request_messages.push(message);
897 }
898 } else {
899 request_messages.push(message);
900 }
901 }
902
903 let mut messages: Vec<ChatMessage> = Vec::new();
904 for message in request_messages {
905 match message.role {
906 Role::User => {
907 for content in &message.content {
908 if let MessageContent::ToolResult(tool_result) = content {
909 let content = match &tool_result.content {
910 LanguageModelToolResultContent::Text(text) => text.to_string().into(),
911 LanguageModelToolResultContent::Image(image) => {
912 if model.supports_vision() {
913 ChatMessageContent::Multipart(vec![ChatMessagePart::Image {
914 image_url: ImageUrl {
915 url: image.to_base64_url(),
916 },
917 }])
918 } else {
919 debug_panic!(
920 "This should be caught at {} level",
921 tool_result.tool_name
922 );
923 "[Tool responded with an image, but this model does not support vision]".to_string().into()
924 }
925 }
926 };
927
928 messages.push(ChatMessage::Tool {
929 tool_call_id: tool_result.tool_use_id.to_string(),
930 content,
931 });
932 }
933 }
934
935 let mut content_parts = Vec::new();
936 for content in &message.content {
937 match content {
938 MessageContent::Text(text) | MessageContent::Thinking { text, .. }
939 if !text.is_empty() =>
940 {
941 if let Some(ChatMessagePart::Text { text: text_content }) =
942 content_parts.last_mut()
943 {
944 text_content.push_str(text);
945 } else {
946 content_parts.push(ChatMessagePart::Text {
947 text: text.to_string(),
948 });
949 }
950 }
951 MessageContent::Image(image) if model.supports_vision() => {
952 content_parts.push(ChatMessagePart::Image {
953 image_url: ImageUrl {
954 url: image.to_base64_url(),
955 },
956 });
957 }
958 _ => {}
959 }
960 }
961
962 if !content_parts.is_empty() {
963 messages.push(ChatMessage::User {
964 content: content_parts.into(),
965 });
966 }
967 }
968 Role::Assistant => {
969 let mut tool_calls = Vec::new();
970 for content in &message.content {
971 if let MessageContent::ToolUse(tool_use) = content {
972 tool_calls.push(ToolCall {
973 id: tool_use.id.to_string(),
974 content: ToolCallContent::Function {
975 function: FunctionContent {
976 name: tool_use.name.to_string(),
977 arguments: serde_json::to_string(&tool_use.input)?,
978 thought_signature: tool_use.thought_signature.clone(),
979 },
980 },
981 });
982 }
983 }
984
985 let text_content = {
986 let mut buffer = String::new();
987 for string in message.content.iter().filter_map(|content| match content {
988 MessageContent::Text(text) => Some(text.as_str()),
989 MessageContent::Thinking { .. }
990 | MessageContent::ToolUse(_)
991 | MessageContent::RedactedThinking(_)
992 | MessageContent::ToolResult(_)
993 | MessageContent::Image(_) => None,
994 }) {
995 buffer.push_str(string);
996 }
997
998 buffer
999 };
1000
1001 // Extract reasoning_opaque and reasoning_text from reasoning_details
1002 let (reasoning_opaque, reasoning_text) =
1003 if let Some(details) = &message.reasoning_details {
1004 let opaque = details
1005 .get("reasoning_opaque")
1006 .and_then(|v| v.as_str())
1007 .map(|s| s.to_string());
1008 let text = details
1009 .get("reasoning_text")
1010 .and_then(|v| v.as_str())
1011 .map(|s| s.to_string());
1012 (opaque, text)
1013 } else {
1014 (None, None)
1015 };
1016
1017 messages.push(ChatMessage::Assistant {
1018 content: if text_content.is_empty() {
1019 ChatMessageContent::empty()
1020 } else {
1021 text_content.into()
1022 },
1023 tool_calls,
1024 reasoning_opaque,
1025 reasoning_text,
1026 });
1027 }
1028 Role::System => messages.push(ChatMessage::System {
1029 content: message.string_contents(),
1030 }),
1031 }
1032 }
1033
1034 let tools = request
1035 .tools
1036 .iter()
1037 .map(|tool| Tool::Function {
1038 function: Function {
1039 name: tool.name.clone(),
1040 description: tool.description.clone(),
1041 parameters: tool.input_schema.clone(),
1042 },
1043 })
1044 .collect::<Vec<_>>();
1045
1046 Ok(CopilotChatRequest {
1047 n: 1,
1048 stream: model.uses_streaming(),
1049 temperature: temperature.unwrap_or(0.1),
1050 model: model.id().to_string(),
1051 messages,
1052 tools,
1053 tool_choice: tool_choice.map(|choice| match choice {
1054 LanguageModelToolChoice::Auto => ToolChoice::Auto,
1055 LanguageModelToolChoice::Any => ToolChoice::Required,
1056 LanguageModelToolChoice::None => ToolChoice::None,
1057 }),
1058 thinking_budget: if thinking_allowed && model.supports_thinking() {
1059 compute_thinking_budget(
1060 model.min_thinking_budget(),
1061 model.max_thinking_budget(),
1062 model.max_output_tokens() as u32,
1063 )
1064 } else {
1065 None
1066 },
1067 })
1068}
1069
1070fn compute_thinking_budget(
1071 min_budget: Option<u32>,
1072 max_budget: Option<u32>,
1073 max_output_tokens: u32,
1074) -> Option<u32> {
1075 let configured_budget: u32 = 16000;
1076 let min_budget = min_budget.unwrap_or(1024);
1077 let max_budget = max_budget.unwrap_or(max_output_tokens.saturating_sub(1));
1078 let normalized = configured_budget.max(min_budget);
1079 Some(
1080 normalized
1081 .min(max_budget)
1082 .min(max_output_tokens.saturating_sub(1)),
1083 )
1084}
1085
1086fn intent_to_chat_location(intent: Option<CompletionIntent>) -> ChatLocation {
1087 match intent {
1088 Some(CompletionIntent::UserPrompt) => ChatLocation::Agent,
1089 Some(CompletionIntent::Subagent) => ChatLocation::Agent,
1090 Some(CompletionIntent::ToolResults) => ChatLocation::Agent,
1091 Some(CompletionIntent::ThreadSummarization) => ChatLocation::Panel,
1092 Some(CompletionIntent::ThreadContextSummarization) => ChatLocation::Panel,
1093 Some(CompletionIntent::CreateFile) => ChatLocation::Agent,
1094 Some(CompletionIntent::EditFile) => ChatLocation::Agent,
1095 Some(CompletionIntent::InlineAssist) => ChatLocation::Editor,
1096 Some(CompletionIntent::TerminalInlineAssist) => ChatLocation::Terminal,
1097 Some(CompletionIntent::GenerateGitCommitMessage) => ChatLocation::Other,
1098 None => ChatLocation::Panel,
1099 }
1100}
1101
1102fn into_copilot_responses(
1103 model: &CopilotChatModel,
1104 request: LanguageModelRequest,
1105) -> copilot_responses::Request {
1106 use copilot_responses as responses;
1107
1108 let LanguageModelRequest {
1109 thread_id: _,
1110 prompt_id: _,
1111 intent: _,
1112 messages,
1113 tools,
1114 tool_choice,
1115 stop: _,
1116 temperature,
1117 thinking_allowed,
1118 thinking_effort,
1119 speed: _,
1120 } = request;
1121
1122 let mut input_items: Vec<responses::ResponseInputItem> = Vec::new();
1123
1124 for message in messages {
1125 match message.role {
1126 Role::User => {
1127 for content in &message.content {
1128 if let MessageContent::ToolResult(tool_result) = content {
1129 let output = match &tool_result.content {
1130 LanguageModelToolResultContent::Text(text) => {
1131 responses::ResponseFunctionOutput::Text(text.to_string())
1132 }
1133 LanguageModelToolResultContent::Image(image) => {
1134 if model.supports_vision() {
1135 responses::ResponseFunctionOutput::Content(vec![
1136 responses::ResponseInputContent::InputImage {
1137 image_url: Some(image.to_base64_url()),
1138 detail: Default::default(),
1139 },
1140 ])
1141 } else {
1142 debug_panic!(
1143 "This should be caught at {} level",
1144 tool_result.tool_name
1145 );
1146 responses::ResponseFunctionOutput::Text(
1147 "[Tool responded with an image, but this model does not support vision]".into(),
1148 )
1149 }
1150 }
1151 };
1152
1153 input_items.push(responses::ResponseInputItem::FunctionCallOutput {
1154 call_id: tool_result.tool_use_id.to_string(),
1155 output,
1156 status: None,
1157 });
1158 }
1159 }
1160
1161 let mut parts: Vec<responses::ResponseInputContent> = Vec::new();
1162 for content in &message.content {
1163 match content {
1164 MessageContent::Text(text) => {
1165 parts.push(responses::ResponseInputContent::InputText {
1166 text: text.clone(),
1167 });
1168 }
1169
1170 MessageContent::Image(image) => {
1171 if model.supports_vision() {
1172 parts.push(responses::ResponseInputContent::InputImage {
1173 image_url: Some(image.to_base64_url()),
1174 detail: Default::default(),
1175 });
1176 }
1177 }
1178 _ => {}
1179 }
1180 }
1181
1182 if !parts.is_empty() {
1183 input_items.push(responses::ResponseInputItem::Message {
1184 role: "user".into(),
1185 content: Some(parts),
1186 status: None,
1187 });
1188 }
1189 }
1190
1191 Role::Assistant => {
1192 for content in &message.content {
1193 if let MessageContent::ToolUse(tool_use) = content {
1194 input_items.push(responses::ResponseInputItem::FunctionCall {
1195 call_id: tool_use.id.to_string(),
1196 name: tool_use.name.to_string(),
1197 arguments: tool_use.raw_input.clone(),
1198 status: None,
1199 thought_signature: tool_use.thought_signature.clone(),
1200 });
1201 }
1202 }
1203
1204 for content in &message.content {
1205 if let MessageContent::RedactedThinking(data) = content {
1206 input_items.push(responses::ResponseInputItem::Reasoning {
1207 id: None,
1208 summary: Vec::new(),
1209 encrypted_content: data.clone(),
1210 });
1211 }
1212 }
1213
1214 let mut parts: Vec<responses::ResponseInputContent> = Vec::new();
1215 for content in &message.content {
1216 match content {
1217 MessageContent::Text(text) => {
1218 parts.push(responses::ResponseInputContent::OutputText {
1219 text: text.clone(),
1220 });
1221 }
1222 MessageContent::Image(_) => {
1223 parts.push(responses::ResponseInputContent::OutputText {
1224 text: "[image omitted]".to_string(),
1225 });
1226 }
1227 _ => {}
1228 }
1229 }
1230
1231 if !parts.is_empty() {
1232 input_items.push(responses::ResponseInputItem::Message {
1233 role: "assistant".into(),
1234 content: Some(parts),
1235 status: Some("completed".into()),
1236 });
1237 }
1238 }
1239
1240 Role::System => {
1241 let mut parts: Vec<responses::ResponseInputContent> = Vec::new();
1242 for content in &message.content {
1243 if let MessageContent::Text(text) = content {
1244 parts.push(responses::ResponseInputContent::InputText {
1245 text: text.clone(),
1246 });
1247 }
1248 }
1249
1250 if !parts.is_empty() {
1251 input_items.push(responses::ResponseInputItem::Message {
1252 role: "system".into(),
1253 content: Some(parts),
1254 status: None,
1255 });
1256 }
1257 }
1258 }
1259 }
1260
1261 let converted_tools: Vec<responses::ToolDefinition> = tools
1262 .into_iter()
1263 .map(|tool| responses::ToolDefinition::Function {
1264 name: tool.name,
1265 description: Some(tool.description),
1266 parameters: Some(tool.input_schema),
1267 strict: None,
1268 })
1269 .collect();
1270
1271 let mapped_tool_choice = tool_choice.map(|choice| match choice {
1272 LanguageModelToolChoice::Auto => responses::ToolChoice::Auto,
1273 LanguageModelToolChoice::Any => responses::ToolChoice::Required,
1274 LanguageModelToolChoice::None => responses::ToolChoice::None,
1275 });
1276
1277 responses::Request {
1278 model: model.id().to_string(),
1279 input: input_items,
1280 stream: model.uses_streaming(),
1281 temperature,
1282 tools: converted_tools,
1283 tool_choice: mapped_tool_choice,
1284 reasoning: if thinking_allowed {
1285 let effort = thinking_effort
1286 .as_deref()
1287 .and_then(|e| e.parse::<copilot_responses::ReasoningEffort>().ok())
1288 .unwrap_or(copilot_responses::ReasoningEffort::Medium);
1289 Some(copilot_responses::ReasoningConfig {
1290 effort,
1291 summary: Some(copilot_responses::ReasoningSummary::Detailed),
1292 })
1293 } else {
1294 None
1295 },
1296 include: Some(vec![
1297 copilot_responses::ResponseIncludable::ReasoningEncryptedContent,
1298 ]),
1299 store: false,
1300 }
1301}
1302
1303#[cfg(test)]
1304mod tests {
1305 use super::*;
1306 use copilot_chat::responses;
1307 use futures::StreamExt;
1308
1309 fn map_events(events: Vec<responses::StreamEvent>) -> Vec<LanguageModelCompletionEvent> {
1310 futures::executor::block_on(async {
1311 CopilotResponsesEventMapper::new()
1312 .map_stream(Box::pin(futures::stream::iter(events.into_iter().map(Ok))))
1313 .collect::<Vec<_>>()
1314 .await
1315 .into_iter()
1316 .map(Result::unwrap)
1317 .collect()
1318 })
1319 }
1320
1321 #[test]
1322 fn responses_stream_maps_text_and_usage() {
1323 let events = vec![
1324 responses::StreamEvent::OutputItemAdded {
1325 output_index: 0,
1326 sequence_number: None,
1327 item: responses::ResponseOutputItem::Message {
1328 id: "msg_1".into(),
1329 role: "assistant".into(),
1330 content: Some(Vec::new()),
1331 },
1332 },
1333 responses::StreamEvent::OutputTextDelta {
1334 item_id: "msg_1".into(),
1335 output_index: 0,
1336 delta: "Hello".into(),
1337 },
1338 responses::StreamEvent::Completed {
1339 response: responses::Response {
1340 usage: Some(responses::ResponseUsage {
1341 input_tokens: Some(5),
1342 output_tokens: Some(3),
1343 total_tokens: Some(8),
1344 }),
1345 ..Default::default()
1346 },
1347 },
1348 ];
1349
1350 let mapped = map_events(events);
1351 assert!(matches!(
1352 mapped[0],
1353 LanguageModelCompletionEvent::StartMessage { ref message_id } if message_id == "msg_1"
1354 ));
1355 assert!(matches!(
1356 mapped[1],
1357 LanguageModelCompletionEvent::Text(ref text) if text == "Hello"
1358 ));
1359 assert!(matches!(
1360 mapped[2],
1361 LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
1362 input_tokens: 5,
1363 output_tokens: 3,
1364 ..
1365 })
1366 ));
1367 assert!(matches!(
1368 mapped[3],
1369 LanguageModelCompletionEvent::Stop(StopReason::EndTurn)
1370 ));
1371 }
1372
1373 #[test]
1374 fn responses_stream_maps_tool_calls() {
1375 let events = vec![responses::StreamEvent::OutputItemDone {
1376 output_index: 0,
1377 sequence_number: None,
1378 item: responses::ResponseOutputItem::FunctionCall {
1379 id: Some("fn_1".into()),
1380 call_id: "call_1".into(),
1381 name: "do_it".into(),
1382 arguments: "{\"x\":1}".into(),
1383 status: None,
1384 thought_signature: None,
1385 },
1386 }];
1387
1388 let mapped = map_events(events);
1389 assert!(matches!(
1390 mapped[0],
1391 LanguageModelCompletionEvent::ToolUse(ref use_) if use_.id.to_string() == "call_1" && use_.name.as_ref() == "do_it"
1392 ));
1393 assert!(matches!(
1394 mapped[1],
1395 LanguageModelCompletionEvent::Stop(StopReason::ToolUse)
1396 ));
1397 }
1398
1399 #[test]
1400 fn responses_stream_handles_json_parse_error() {
1401 let events = vec![responses::StreamEvent::OutputItemDone {
1402 output_index: 0,
1403 sequence_number: None,
1404 item: responses::ResponseOutputItem::FunctionCall {
1405 id: Some("fn_1".into()),
1406 call_id: "call_1".into(),
1407 name: "do_it".into(),
1408 arguments: "{not json}".into(),
1409 status: None,
1410 thought_signature: None,
1411 },
1412 }];
1413
1414 let mapped = map_events(events);
1415 assert!(matches!(
1416 mapped[0],
1417 LanguageModelCompletionEvent::ToolUseJsonParseError { ref id, ref tool_name, .. }
1418 if id.to_string() == "call_1" && tool_name.as_ref() == "do_it"
1419 ));
1420 assert!(matches!(
1421 mapped[1],
1422 LanguageModelCompletionEvent::Stop(StopReason::ToolUse)
1423 ));
1424 }
1425
1426 #[test]
1427 fn responses_stream_maps_reasoning_summary_and_encrypted_content() {
1428 let events = vec![responses::StreamEvent::OutputItemDone {
1429 output_index: 0,
1430 sequence_number: None,
1431 item: responses::ResponseOutputItem::Reasoning {
1432 id: "r1".into(),
1433 summary: Some(vec![responses::ResponseReasoningItem {
1434 kind: "summary_text".into(),
1435 text: "Chain".into(),
1436 }]),
1437 encrypted_content: Some("ENC".into()),
1438 },
1439 }];
1440
1441 let mapped = map_events(events);
1442 assert!(matches!(
1443 mapped[0],
1444 LanguageModelCompletionEvent::Thinking { ref text, signature: None } if text == "Chain"
1445 ));
1446 assert!(matches!(
1447 mapped[1],
1448 LanguageModelCompletionEvent::RedactedThinking { ref data } if data == "ENC"
1449 ));
1450 }
1451
1452 #[test]
1453 fn responses_stream_handles_incomplete_max_tokens() {
1454 let events = vec![responses::StreamEvent::Incomplete {
1455 response: responses::Response {
1456 usage: Some(responses::ResponseUsage {
1457 input_tokens: Some(10),
1458 output_tokens: Some(0),
1459 total_tokens: Some(10),
1460 }),
1461 incomplete_details: Some(responses::IncompleteDetails {
1462 reason: Some(responses::IncompleteReason::MaxOutputTokens),
1463 }),
1464 ..Default::default()
1465 },
1466 }];
1467
1468 let mapped = map_events(events);
1469 assert!(matches!(
1470 mapped[0],
1471 LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
1472 input_tokens: 10,
1473 output_tokens: 0,
1474 ..
1475 })
1476 ));
1477 assert!(matches!(
1478 mapped[1],
1479 LanguageModelCompletionEvent::Stop(StopReason::MaxTokens)
1480 ));
1481 }
1482
1483 #[test]
1484 fn responses_stream_handles_incomplete_content_filter() {
1485 let events = vec![responses::StreamEvent::Incomplete {
1486 response: responses::Response {
1487 usage: None,
1488 incomplete_details: Some(responses::IncompleteDetails {
1489 reason: Some(responses::IncompleteReason::ContentFilter),
1490 }),
1491 ..Default::default()
1492 },
1493 }];
1494
1495 let mapped = map_events(events);
1496 assert!(matches!(
1497 mapped.last().unwrap(),
1498 LanguageModelCompletionEvent::Stop(StopReason::Refusal)
1499 ));
1500 }
1501
1502 #[test]
1503 fn responses_stream_completed_no_duplicate_after_tool_use() {
1504 let events = vec![
1505 responses::StreamEvent::OutputItemDone {
1506 output_index: 0,
1507 sequence_number: None,
1508 item: responses::ResponseOutputItem::FunctionCall {
1509 id: Some("fn_1".into()),
1510 call_id: "call_1".into(),
1511 name: "do_it".into(),
1512 arguments: "{}".into(),
1513 status: None,
1514 thought_signature: None,
1515 },
1516 },
1517 responses::StreamEvent::Completed {
1518 response: responses::Response::default(),
1519 },
1520 ];
1521
1522 let mapped = map_events(events);
1523
1524 let mut stop_count = 0usize;
1525 let mut saw_tool_use_stop = false;
1526 for event in mapped {
1527 if let LanguageModelCompletionEvent::Stop(reason) = event {
1528 stop_count += 1;
1529 if matches!(reason, StopReason::ToolUse) {
1530 saw_tool_use_stop = true;
1531 }
1532 }
1533 }
1534 assert_eq!(stop_count, 1, "should emit exactly one Stop event");
1535 assert!(saw_tool_use_stop, "Stop reason should be ToolUse");
1536 }
1537
1538 #[test]
1539 fn responses_stream_failed_maps_http_response_error() {
1540 let events = vec![responses::StreamEvent::Failed {
1541 response: responses::Response {
1542 error: Some(responses::ResponseError {
1543 code: "429".into(),
1544 message: "too many requests".into(),
1545 }),
1546 ..Default::default()
1547 },
1548 }];
1549
1550 let mapped_results = futures::executor::block_on(async {
1551 CopilotResponsesEventMapper::new()
1552 .map_stream(Box::pin(futures::stream::iter(events.into_iter().map(Ok))))
1553 .collect::<Vec<_>>()
1554 .await
1555 });
1556
1557 assert_eq!(mapped_results.len(), 1);
1558 match &mapped_results[0] {
1559 Err(LanguageModelCompletionError::HttpResponseError {
1560 status_code,
1561 message,
1562 ..
1563 }) => {
1564 assert_eq!(*status_code, http_client::StatusCode::TOO_MANY_REQUESTS);
1565 assert_eq!(message, "too many requests");
1566 }
1567 other => panic!("expected HttpResponseError, got {:?}", other),
1568 }
1569 }
1570
1571 #[test]
1572 fn chat_completions_stream_maps_reasoning_data() {
1573 use copilot_chat::{
1574 FunctionChunk, ResponseChoice, ResponseDelta, ResponseEvent, Role, ToolCallChunk,
1575 };
1576
1577 let events = vec![
1578 ResponseEvent {
1579 choices: vec![ResponseChoice {
1580 index: Some(0),
1581 finish_reason: None,
1582 delta: Some(ResponseDelta {
1583 content: None,
1584 role: Some(Role::Assistant),
1585 tool_calls: vec![ToolCallChunk {
1586 index: Some(0),
1587 id: Some("call_abc123".to_string()),
1588 function: Some(FunctionChunk {
1589 name: Some("list_directory".to_string()),
1590 arguments: Some("{\"path\":\"test\"}".to_string()),
1591 thought_signature: None,
1592 }),
1593 }],
1594 reasoning_opaque: Some("encrypted_reasoning_token_xyz".to_string()),
1595 reasoning_text: Some("Let me check the directory".to_string()),
1596 }),
1597 message: None,
1598 }],
1599 id: "chatcmpl-123".to_string(),
1600 usage: None,
1601 },
1602 ResponseEvent {
1603 choices: vec![ResponseChoice {
1604 index: Some(0),
1605 finish_reason: Some("tool_calls".to_string()),
1606 delta: Some(ResponseDelta {
1607 content: None,
1608 role: None,
1609 tool_calls: vec![],
1610 reasoning_opaque: None,
1611 reasoning_text: None,
1612 }),
1613 message: None,
1614 }],
1615 id: "chatcmpl-123".to_string(),
1616 usage: None,
1617 },
1618 ];
1619
1620 let mapped = futures::executor::block_on(async {
1621 map_to_language_model_completion_events(
1622 Box::pin(futures::stream::iter(events.into_iter().map(Ok))),
1623 true,
1624 )
1625 .collect::<Vec<_>>()
1626 .await
1627 });
1628
1629 let mut has_reasoning_details = false;
1630 let mut has_tool_use = false;
1631 let mut reasoning_opaque_value: Option<String> = None;
1632 let mut reasoning_text_value: Option<String> = None;
1633
1634 for event_result in mapped {
1635 match event_result {
1636 Ok(LanguageModelCompletionEvent::ReasoningDetails(details)) => {
1637 has_reasoning_details = true;
1638 reasoning_opaque_value = details
1639 .get("reasoning_opaque")
1640 .and_then(|v| v.as_str())
1641 .map(|s| s.to_string());
1642 reasoning_text_value = details
1643 .get("reasoning_text")
1644 .and_then(|v| v.as_str())
1645 .map(|s| s.to_string());
1646 }
1647 Ok(LanguageModelCompletionEvent::ToolUse(tool_use)) => {
1648 has_tool_use = true;
1649 assert_eq!(tool_use.id.to_string(), "call_abc123");
1650 assert_eq!(tool_use.name.as_ref(), "list_directory");
1651 }
1652 _ => {}
1653 }
1654 }
1655
1656 assert!(
1657 has_reasoning_details,
1658 "Should emit ReasoningDetails event for Gemini 3 reasoning"
1659 );
1660 assert!(has_tool_use, "Should emit ToolUse event");
1661 assert_eq!(
1662 reasoning_opaque_value,
1663 Some("encrypted_reasoning_token_xyz".to_string()),
1664 "Should capture reasoning_opaque"
1665 );
1666 assert_eq!(
1667 reasoning_text_value,
1668 Some("Let me check the directory".to_string()),
1669 "Should capture reasoning_text"
1670 );
1671 }
1672}