1use std::pin::Pin;
2use std::str::FromStr as _;
3use std::sync::Arc;
4
5use anthropic::AnthropicModelMode;
6use anyhow::{Result, anyhow};
7use cloud_llm_client::CompletionIntent;
8use collections::HashMap;
9use copilot::{GlobalCopilotAuth, Status};
10use copilot_chat::responses as copilot_responses;
11use copilot_chat::{
12 ChatLocation, ChatMessage, ChatMessageContent, ChatMessagePart, CopilotChat,
13 CopilotChatConfiguration, Function, FunctionContent, ImageUrl, Model as CopilotChatModel,
14 ModelVendor, Request as CopilotChatRequest, ResponseEvent, Tool, ToolCall, ToolCallContent,
15 ToolChoice,
16};
17use futures::future::BoxFuture;
18use futures::stream::BoxStream;
19use futures::{FutureExt, Stream, StreamExt};
20use gpui::{AnyView, App, AsyncApp, Entity, Subscription, Task};
21use http_client::StatusCode;
22use language::language_settings::all_language_settings;
23use language_model::{
24 AuthenticateError, IconOrSvg, LanguageModel, LanguageModelCompletionError,
25 LanguageModelCompletionEvent, LanguageModelCostInfo, LanguageModelEffortLevel, LanguageModelId,
26 LanguageModelName, LanguageModelProvider, LanguageModelProviderId, LanguageModelProviderName,
27 LanguageModelProviderState, LanguageModelRequest, LanguageModelRequestMessage,
28 LanguageModelToolChoice, LanguageModelToolResultContent, LanguageModelToolSchemaFormat,
29 LanguageModelToolUse, MessageContent, RateLimiter, Role, StopReason, TokenUsage,
30};
31use settings::SettingsStore;
32use ui::prelude::*;
33use util::debug_panic;
34
35use crate::provider::anthropic::{AnthropicEventMapper, into_anthropic};
36use crate::provider::util::parse_tool_arguments;
37
38const PROVIDER_ID: LanguageModelProviderId = LanguageModelProviderId::new("copilot_chat");
39const PROVIDER_NAME: LanguageModelProviderName =
40 LanguageModelProviderName::new("GitHub Copilot Chat");
41
42pub struct CopilotChatLanguageModelProvider {
43 state: Entity<State>,
44}
45
46pub struct State {
47 _copilot_chat_subscription: Option<Subscription>,
48 _settings_subscription: Subscription,
49}
50
51impl State {
52 fn is_authenticated(&self, cx: &App) -> bool {
53 CopilotChat::global(cx)
54 .map(|m| m.read(cx).is_authenticated())
55 .unwrap_or(false)
56 }
57}
58
59impl CopilotChatLanguageModelProvider {
60 pub fn new(cx: &mut App) -> Self {
61 let state = cx.new(|cx| {
62 let copilot_chat_subscription = CopilotChat::global(cx)
63 .map(|copilot_chat| cx.observe(&copilot_chat, |_, _, cx| cx.notify()));
64 State {
65 _copilot_chat_subscription: copilot_chat_subscription,
66 _settings_subscription: cx.observe_global::<SettingsStore>(|_, cx| {
67 if let Some(copilot_chat) = CopilotChat::global(cx) {
68 let language_settings = all_language_settings(None, cx);
69 let configuration = CopilotChatConfiguration {
70 enterprise_uri: language_settings
71 .edit_predictions
72 .copilot
73 .enterprise_uri
74 .clone(),
75 };
76 copilot_chat.update(cx, |chat, cx| {
77 chat.set_configuration(configuration, cx);
78 });
79 }
80 cx.notify();
81 }),
82 }
83 });
84
85 Self { state }
86 }
87
88 fn create_language_model(&self, model: CopilotChatModel) -> Arc<dyn LanguageModel> {
89 Arc::new(CopilotChatLanguageModel {
90 model,
91 request_limiter: RateLimiter::new(4),
92 })
93 }
94}
95
96impl LanguageModelProviderState for CopilotChatLanguageModelProvider {
97 type ObservableEntity = State;
98
99 fn observable_entity(&self) -> Option<Entity<Self::ObservableEntity>> {
100 Some(self.state.clone())
101 }
102}
103
104impl LanguageModelProvider for CopilotChatLanguageModelProvider {
105 fn id(&self) -> LanguageModelProviderId {
106 PROVIDER_ID
107 }
108
109 fn name(&self) -> LanguageModelProviderName {
110 PROVIDER_NAME
111 }
112
113 fn icon(&self) -> IconOrSvg {
114 IconOrSvg::Icon(IconName::Copilot)
115 }
116
117 fn default_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
118 let models = CopilotChat::global(cx).and_then(|m| m.read(cx).models())?;
119 models
120 .first()
121 .map(|model| self.create_language_model(model.clone()))
122 }
123
124 fn default_fast_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
125 // The default model should be Copilot Chat's 'base model', which is likely a relatively fast
126 // model (e.g. 4o) and a sensible choice when considering premium requests
127 self.default_model(cx)
128 }
129
130 fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
131 let Some(models) = CopilotChat::global(cx).and_then(|m| m.read(cx).models()) else {
132 return Vec::new();
133 };
134 models
135 .iter()
136 .map(|model| self.create_language_model(model.clone()))
137 .collect()
138 }
139
140 fn is_authenticated(&self, cx: &App) -> bool {
141 self.state.read(cx).is_authenticated(cx)
142 }
143
144 fn authenticate(&self, cx: &mut App) -> Task<Result<(), AuthenticateError>> {
145 if self.is_authenticated(cx) {
146 return Task::ready(Ok(()));
147 };
148
149 let Some(copilot) = GlobalCopilotAuth::try_global(cx).cloned() else {
150 return Task::ready(Err(anyhow!(concat!(
151 "Copilot must be enabled for Copilot Chat to work. ",
152 "Please enable Copilot and try again."
153 ))
154 .into()));
155 };
156
157 let err = match copilot.0.read(cx).status() {
158 Status::Authorized => return Task::ready(Ok(())),
159 Status::Disabled => anyhow!(
160 "Copilot must be enabled for Copilot Chat to work. Please enable Copilot and try again."
161 ),
162 Status::Error(err) => anyhow!(format!(
163 "Received the following error while signing into Copilot: {err}"
164 )),
165 Status::Starting { task: _ } => anyhow!(
166 "Copilot is still starting, please wait for Copilot to start then try again"
167 ),
168 Status::Unauthorized => anyhow!(
169 "Unable to authorize with Copilot. Please make sure that you have an active Copilot and Copilot Chat subscription."
170 ),
171 Status::SignedOut { .. } => {
172 anyhow!("You have signed out of Copilot. Please sign in to Copilot and try again.")
173 }
174 Status::SigningIn { prompt: _ } => anyhow!("Still signing into Copilot..."),
175 };
176
177 Task::ready(Err(err.into()))
178 }
179
180 fn configuration_view(
181 &self,
182 _target_agent: language_model::ConfigurationViewTargetAgent,
183 _: &mut Window,
184 cx: &mut App,
185 ) -> AnyView {
186 cx.new(|cx| {
187 copilot_ui::ConfigurationView::new(
188 |cx| {
189 CopilotChat::global(cx)
190 .map(|m| m.read(cx).is_authenticated())
191 .unwrap_or(false)
192 },
193 copilot_ui::ConfigurationMode::Chat,
194 cx,
195 )
196 })
197 .into()
198 }
199
200 fn reset_credentials(&self, _cx: &mut App) -> Task<Result<()>> {
201 Task::ready(Err(anyhow!(
202 "Signing out of GitHub Copilot Chat is currently not supported."
203 )))
204 }
205}
206
207fn collect_tiktoken_messages(
208 request: LanguageModelRequest,
209) -> Vec<tiktoken_rs::ChatCompletionRequestMessage> {
210 request
211 .messages
212 .into_iter()
213 .map(|message| tiktoken_rs::ChatCompletionRequestMessage {
214 role: match message.role {
215 Role::User => "user".into(),
216 Role::Assistant => "assistant".into(),
217 Role::System => "system".into(),
218 },
219 content: Some(message.string_contents()),
220 name: None,
221 function_call: None,
222 })
223 .collect::<Vec<_>>()
224}
225
226pub struct CopilotChatLanguageModel {
227 model: CopilotChatModel,
228 request_limiter: RateLimiter,
229}
230
231impl LanguageModel for CopilotChatLanguageModel {
232 fn id(&self) -> LanguageModelId {
233 LanguageModelId::from(self.model.id().to_string())
234 }
235
236 fn name(&self) -> LanguageModelName {
237 LanguageModelName::from(self.model.display_name().to_string())
238 }
239
240 fn provider_id(&self) -> LanguageModelProviderId {
241 PROVIDER_ID
242 }
243
244 fn provider_name(&self) -> LanguageModelProviderName {
245 PROVIDER_NAME
246 }
247
248 fn supports_tools(&self) -> bool {
249 self.model.supports_tools()
250 }
251
252 fn supports_streaming_tools(&self) -> bool {
253 true
254 }
255
256 fn supports_images(&self) -> bool {
257 self.model.supports_vision()
258 }
259
260 fn supports_thinking(&self) -> bool {
261 self.model.can_think()
262 }
263
264 fn supported_effort_levels(&self) -> Vec<LanguageModelEffortLevel> {
265 let levels = self.model.reasoning_effort_levels();
266 if levels.is_empty() {
267 return vec![];
268 }
269 levels
270 .iter()
271 .map(|level| {
272 let name: SharedString = match level.as_str() {
273 "low" => "Low".into(),
274 "medium" => "Medium".into(),
275 "high" => "High".into(),
276 _ => SharedString::from(level.clone()),
277 };
278 LanguageModelEffortLevel {
279 name,
280 value: SharedString::from(level.clone()),
281 is_default: level == "high",
282 }
283 })
284 .collect()
285 }
286
287 fn tool_input_format(&self) -> LanguageModelToolSchemaFormat {
288 match self.model.vendor() {
289 ModelVendor::OpenAI | ModelVendor::Anthropic => {
290 LanguageModelToolSchemaFormat::JsonSchema
291 }
292 ModelVendor::Google | ModelVendor::XAI | ModelVendor::Unknown => {
293 LanguageModelToolSchemaFormat::JsonSchemaSubset
294 }
295 }
296 }
297
298 fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
299 match choice {
300 LanguageModelToolChoice::Auto
301 | LanguageModelToolChoice::Any
302 | LanguageModelToolChoice::None => self.supports_tools(),
303 }
304 }
305
306 fn model_cost_info(&self) -> Option<LanguageModelCostInfo> {
307 LanguageModelCostInfo::RequestCost {
308 cost_per_request: self.model.multiplier(),
309 }
310 .into()
311 }
312
313 fn telemetry_id(&self) -> String {
314 format!("copilot_chat/{}", self.model.id())
315 }
316
317 fn max_token_count(&self) -> u64 {
318 self.model.max_token_count()
319 }
320
321 fn count_tokens(
322 &self,
323 request: LanguageModelRequest,
324 cx: &App,
325 ) -> BoxFuture<'static, Result<u64>> {
326 let model = self.model.clone();
327 cx.background_spawn(async move {
328 let messages = collect_tiktoken_messages(request);
329 // Copilot uses OpenAI tiktoken tokenizer for all it's model irrespective of the underlying provider(vendor).
330 let tokenizer_model = match model.tokenizer() {
331 Some("o200k_base") => "gpt-4o",
332 Some("cl100k_base") => "gpt-4",
333 _ => "gpt-4o",
334 };
335
336 tiktoken_rs::num_tokens_from_messages(tokenizer_model, &messages)
337 .map(|tokens| tokens as u64)
338 })
339 .boxed()
340 }
341
342 fn stream_completion(
343 &self,
344 request: LanguageModelRequest,
345 cx: &AsyncApp,
346 ) -> BoxFuture<
347 'static,
348 Result<
349 BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>,
350 LanguageModelCompletionError,
351 >,
352 > {
353 let is_user_initiated = request.intent.is_none_or(|intent| match intent {
354 CompletionIntent::UserPrompt
355 | CompletionIntent::ThreadContextSummarization
356 | CompletionIntent::InlineAssist
357 | CompletionIntent::TerminalInlineAssist
358 | CompletionIntent::GenerateGitCommitMessage => true,
359
360 CompletionIntent::ToolResults
361 | CompletionIntent::ThreadSummarization
362 | CompletionIntent::CreateFile
363 | CompletionIntent::EditFile => false,
364 });
365
366 if self.model.supports_messages() {
367 let location = intent_to_chat_location(request.intent);
368 let model = self.model.clone();
369 let request_limiter = self.request_limiter.clone();
370 let future = cx.spawn(async move |cx| {
371 let effort = request
372 .thinking_effort
373 .as_ref()
374 .and_then(|e| anthropic::Effort::from_str(e).ok());
375
376 let mut anthropic_request = into_anthropic(
377 request,
378 model.id().to_string(),
379 0.0,
380 model.max_output_tokens() as u64,
381 if model.supports_adaptive_thinking() {
382 AnthropicModelMode::Thinking {
383 budget_tokens: None,
384 }
385 } else if model.can_think() {
386 AnthropicModelMode::Thinking {
387 budget_tokens: compute_thinking_budget(
388 model.min_thinking_budget(),
389 model.max_thinking_budget(),
390 model.max_output_tokens() as u32,
391 ),
392 }
393 } else {
394 AnthropicModelMode::Default
395 },
396 );
397
398 anthropic_request.temperature = None;
399
400 // The Copilot proxy doesn't support eager_input_streaming on tools.
401 for tool in &mut anthropic_request.tools {
402 tool.eager_input_streaming = false;
403 }
404
405 if model.supports_adaptive_thinking() {
406 if anthropic_request.thinking.is_some() {
407 anthropic_request.thinking = Some(anthropic::Thinking::Adaptive);
408 anthropic_request.output_config = Some(anthropic::OutputConfig { effort });
409 }
410 }
411
412 let anthropic_beta = if !model.supports_adaptive_thinking() && model.can_think() {
413 Some("interleaved-thinking-2025-05-14".to_string())
414 } else {
415 None
416 };
417
418 let body = serde_json::to_string(&anthropic::StreamingRequest {
419 base: anthropic_request,
420 stream: true,
421 })
422 .map_err(|e| anyhow::anyhow!(e))?;
423
424 let stream = CopilotChat::stream_messages(
425 body,
426 location,
427 is_user_initiated,
428 anthropic_beta,
429 cx.clone(),
430 );
431
432 request_limiter
433 .stream(async move {
434 let events = stream.await?;
435 let mapper = AnthropicEventMapper::new();
436 Ok(mapper.map_stream(events).boxed())
437 })
438 .await
439 });
440 return async move { Ok(future.await?.boxed()) }.boxed();
441 }
442
443 if self.model.supports_response() {
444 let location = intent_to_chat_location(request.intent);
445 let responses_request = into_copilot_responses(&self.model, request);
446 let request_limiter = self.request_limiter.clone();
447 let future = cx.spawn(async move |cx| {
448 let request = CopilotChat::stream_response(
449 responses_request,
450 location,
451 is_user_initiated,
452 cx.clone(),
453 );
454 request_limiter
455 .stream(async move {
456 let stream = request.await?;
457 let mapper = CopilotResponsesEventMapper::new();
458 Ok(mapper.map_stream(stream).boxed())
459 })
460 .await
461 });
462 return async move { Ok(future.await?.boxed()) }.boxed();
463 }
464
465 let location = intent_to_chat_location(request.intent);
466 let copilot_request = match into_copilot_chat(&self.model, request) {
467 Ok(request) => request,
468 Err(err) => return futures::future::ready(Err(err.into())).boxed(),
469 };
470 let is_streaming = copilot_request.stream;
471
472 let request_limiter = self.request_limiter.clone();
473 let future = cx.spawn(async move |cx| {
474 let request = CopilotChat::stream_completion(
475 copilot_request,
476 location,
477 is_user_initiated,
478 cx.clone(),
479 );
480 request_limiter
481 .stream(async move {
482 let response = request.await?;
483 Ok(map_to_language_model_completion_events(
484 response,
485 is_streaming,
486 ))
487 })
488 .await
489 });
490 async move { Ok(future.await?.boxed()) }.boxed()
491 }
492}
493
494pub fn map_to_language_model_completion_events(
495 events: Pin<Box<dyn Send + Stream<Item = Result<ResponseEvent>>>>,
496 is_streaming: bool,
497) -> impl Stream<Item = Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
498 #[derive(Default)]
499 struct RawToolCall {
500 id: String,
501 name: String,
502 arguments: String,
503 thought_signature: Option<String>,
504 }
505
506 struct State {
507 events: Pin<Box<dyn Send + Stream<Item = Result<ResponseEvent>>>>,
508 tool_calls_by_index: HashMap<usize, RawToolCall>,
509 reasoning_opaque: Option<String>,
510 reasoning_text: Option<String>,
511 }
512
513 futures::stream::unfold(
514 State {
515 events,
516 tool_calls_by_index: HashMap::default(),
517 reasoning_opaque: None,
518 reasoning_text: None,
519 },
520 move |mut state| async move {
521 if let Some(event) = state.events.next().await {
522 match event {
523 Ok(event) => {
524 let Some(choice) = event.choices.first() else {
525 return Some((
526 vec![Err(anyhow!("Response contained no choices").into())],
527 state,
528 ));
529 };
530
531 let delta = if is_streaming {
532 choice.delta.as_ref()
533 } else {
534 choice.message.as_ref()
535 };
536
537 let Some(delta) = delta else {
538 return Some((
539 vec![Err(anyhow!("Response contained no delta").into())],
540 state,
541 ));
542 };
543
544 let mut events = Vec::new();
545 if let Some(content) = delta.content.clone() {
546 events.push(Ok(LanguageModelCompletionEvent::Text(content)));
547 }
548
549 // Capture reasoning data from the delta (e.g. for Gemini 3)
550 if let Some(opaque) = delta.reasoning_opaque.clone() {
551 state.reasoning_opaque = Some(opaque);
552 }
553 if let Some(text) = delta.reasoning_text.clone() {
554 state.reasoning_text = Some(text);
555 }
556
557 for (index, tool_call) in delta.tool_calls.iter().enumerate() {
558 let tool_index = tool_call.index.unwrap_or(index);
559 let entry = state.tool_calls_by_index.entry(tool_index).or_default();
560
561 if let Some(tool_id) = tool_call.id.clone() {
562 entry.id = tool_id;
563 }
564
565 if let Some(function) = tool_call.function.as_ref() {
566 if let Some(name) = function.name.clone() {
567 entry.name = name;
568 }
569
570 if let Some(arguments) = function.arguments.clone() {
571 entry.arguments.push_str(&arguments);
572 }
573
574 if let Some(thought_signature) = function.thought_signature.clone()
575 {
576 entry.thought_signature = Some(thought_signature);
577 }
578 }
579
580 if !entry.id.is_empty() && !entry.name.is_empty() {
581 if let Ok(input) = serde_json::from_str::<serde_json::Value>(
582 &partial_json_fixer::fix_json(&entry.arguments),
583 ) {
584 events.push(Ok(LanguageModelCompletionEvent::ToolUse(
585 LanguageModelToolUse {
586 id: entry.id.clone().into(),
587 name: entry.name.as_str().into(),
588 is_input_complete: false,
589 input,
590 raw_input: entry.arguments.clone(),
591 thought_signature: entry.thought_signature.clone(),
592 },
593 )));
594 }
595 }
596 }
597
598 if let Some(usage) = event.usage {
599 events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(
600 TokenUsage {
601 input_tokens: usage.prompt_tokens,
602 output_tokens: usage.completion_tokens,
603 cache_creation_input_tokens: 0,
604 cache_read_input_tokens: 0,
605 },
606 )));
607 }
608
609 match choice.finish_reason.as_deref() {
610 Some("stop") => {
611 events.push(Ok(LanguageModelCompletionEvent::Stop(
612 StopReason::EndTurn,
613 )));
614 }
615 Some("tool_calls") => {
616 // Gemini 3 models send reasoning_opaque/reasoning_text that must
617 // be preserved and sent back in subsequent requests. Emit as
618 // ReasoningDetails so the agent stores it in the message.
619 if state.reasoning_opaque.is_some()
620 || state.reasoning_text.is_some()
621 {
622 let mut details = serde_json::Map::new();
623 if let Some(opaque) = state.reasoning_opaque.take() {
624 details.insert(
625 "reasoning_opaque".to_string(),
626 serde_json::Value::String(opaque),
627 );
628 }
629 if let Some(text) = state.reasoning_text.take() {
630 details.insert(
631 "reasoning_text".to_string(),
632 serde_json::Value::String(text),
633 );
634 }
635 events.push(Ok(
636 LanguageModelCompletionEvent::ReasoningDetails(
637 serde_json::Value::Object(details),
638 ),
639 ));
640 }
641
642 events.extend(state.tool_calls_by_index.drain().map(
643 |(_, tool_call)| match parse_tool_arguments(
644 &tool_call.arguments,
645 ) {
646 Ok(input) => Ok(LanguageModelCompletionEvent::ToolUse(
647 LanguageModelToolUse {
648 id: tool_call.id.into(),
649 name: tool_call.name.as_str().into(),
650 is_input_complete: true,
651 input,
652 raw_input: tool_call.arguments,
653 thought_signature: tool_call.thought_signature,
654 },
655 )),
656 Err(error) => Ok(
657 LanguageModelCompletionEvent::ToolUseJsonParseError {
658 id: tool_call.id.into(),
659 tool_name: tool_call.name.as_str().into(),
660 raw_input: tool_call.arguments.into(),
661 json_parse_error: error.to_string(),
662 },
663 ),
664 },
665 ));
666
667 events.push(Ok(LanguageModelCompletionEvent::Stop(
668 StopReason::ToolUse,
669 )));
670 }
671 Some(stop_reason) => {
672 log::error!("Unexpected Copilot Chat stop_reason: {stop_reason:?}");
673 events.push(Ok(LanguageModelCompletionEvent::Stop(
674 StopReason::EndTurn,
675 )));
676 }
677 None => {}
678 }
679
680 return Some((events, state));
681 }
682 Err(err) => return Some((vec![Err(anyhow!(err).into())], state)),
683 }
684 }
685
686 None
687 },
688 )
689 .flat_map(futures::stream::iter)
690}
691
692pub struct CopilotResponsesEventMapper {
693 pending_stop_reason: Option<StopReason>,
694}
695
696impl CopilotResponsesEventMapper {
697 pub fn new() -> Self {
698 Self {
699 pending_stop_reason: None,
700 }
701 }
702
703 pub fn map_stream(
704 mut self,
705 events: Pin<Box<dyn Send + Stream<Item = Result<copilot_responses::StreamEvent>>>>,
706 ) -> impl Stream<Item = Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
707 {
708 events.flat_map(move |event| {
709 futures::stream::iter(match event {
710 Ok(event) => self.map_event(event),
711 Err(error) => vec![Err(LanguageModelCompletionError::from(anyhow!(error)))],
712 })
713 })
714 }
715
716 fn map_event(
717 &mut self,
718 event: copilot_responses::StreamEvent,
719 ) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
720 match event {
721 copilot_responses::StreamEvent::OutputItemAdded { item, .. } => match item {
722 copilot_responses::ResponseOutputItem::Message { id, .. } => {
723 vec![Ok(LanguageModelCompletionEvent::StartMessage {
724 message_id: id,
725 })]
726 }
727 _ => Vec::new(),
728 },
729
730 copilot_responses::StreamEvent::OutputTextDelta { delta, .. } => {
731 if delta.is_empty() {
732 Vec::new()
733 } else {
734 vec![Ok(LanguageModelCompletionEvent::Text(delta))]
735 }
736 }
737
738 copilot_responses::StreamEvent::OutputItemDone { item, .. } => match item {
739 copilot_responses::ResponseOutputItem::Message { .. } => Vec::new(),
740 copilot_responses::ResponseOutputItem::FunctionCall {
741 call_id,
742 name,
743 arguments,
744 thought_signature,
745 ..
746 } => {
747 let mut events = Vec::new();
748 match parse_tool_arguments(&arguments) {
749 Ok(input) => events.push(Ok(LanguageModelCompletionEvent::ToolUse(
750 LanguageModelToolUse {
751 id: call_id.into(),
752 name: name.as_str().into(),
753 is_input_complete: true,
754 input,
755 raw_input: arguments.clone(),
756 thought_signature,
757 },
758 ))),
759 Err(error) => {
760 events.push(Ok(LanguageModelCompletionEvent::ToolUseJsonParseError {
761 id: call_id.into(),
762 tool_name: name.as_str().into(),
763 raw_input: arguments.clone().into(),
764 json_parse_error: error.to_string(),
765 }))
766 }
767 }
768 // Record that we already emitted a tool-use stop so we can avoid duplicating
769 // a Stop event on Completed.
770 self.pending_stop_reason = Some(StopReason::ToolUse);
771 events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::ToolUse)));
772 events
773 }
774 copilot_responses::ResponseOutputItem::Reasoning {
775 summary,
776 encrypted_content,
777 ..
778 } => {
779 let mut events = Vec::new();
780
781 if let Some(blocks) = summary {
782 let mut text = String::new();
783 for block in blocks {
784 text.push_str(&block.text);
785 }
786 if !text.is_empty() {
787 events.push(Ok(LanguageModelCompletionEvent::Thinking {
788 text,
789 signature: None,
790 }));
791 }
792 }
793
794 if let Some(data) = encrypted_content {
795 events.push(Ok(LanguageModelCompletionEvent::RedactedThinking { data }));
796 }
797
798 events
799 }
800 },
801
802 copilot_responses::StreamEvent::Completed { response } => {
803 let mut events = Vec::new();
804 if let Some(usage) = response.usage {
805 events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
806 input_tokens: usage.input_tokens.unwrap_or(0),
807 output_tokens: usage.output_tokens.unwrap_or(0),
808 cache_creation_input_tokens: 0,
809 cache_read_input_tokens: 0,
810 })));
811 }
812 if self.pending_stop_reason.take() != Some(StopReason::ToolUse) {
813 events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::EndTurn)));
814 }
815 events
816 }
817
818 copilot_responses::StreamEvent::Incomplete { response } => {
819 let reason = response
820 .incomplete_details
821 .as_ref()
822 .and_then(|details| details.reason.as_ref());
823 let stop_reason = match reason {
824 Some(copilot_responses::IncompleteReason::MaxOutputTokens) => {
825 StopReason::MaxTokens
826 }
827 Some(copilot_responses::IncompleteReason::ContentFilter) => StopReason::Refusal,
828 _ => self
829 .pending_stop_reason
830 .take()
831 .unwrap_or(StopReason::EndTurn),
832 };
833
834 let mut events = Vec::new();
835 if let Some(usage) = response.usage {
836 events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
837 input_tokens: usage.input_tokens.unwrap_or(0),
838 output_tokens: usage.output_tokens.unwrap_or(0),
839 cache_creation_input_tokens: 0,
840 cache_read_input_tokens: 0,
841 })));
842 }
843 events.push(Ok(LanguageModelCompletionEvent::Stop(stop_reason)));
844 events
845 }
846
847 copilot_responses::StreamEvent::Failed { response } => {
848 let provider = PROVIDER_NAME;
849 let (status_code, message) = match response.error {
850 Some(error) => {
851 let status_code = StatusCode::from_str(&error.code)
852 .unwrap_or(StatusCode::INTERNAL_SERVER_ERROR);
853 (status_code, error.message)
854 }
855 None => (
856 StatusCode::INTERNAL_SERVER_ERROR,
857 "response.failed".to_string(),
858 ),
859 };
860 vec![Err(LanguageModelCompletionError::HttpResponseError {
861 provider,
862 status_code,
863 message,
864 })]
865 }
866
867 copilot_responses::StreamEvent::GenericError { error } => vec![Err(
868 LanguageModelCompletionError::Other(anyhow!(error.message)),
869 )],
870
871 copilot_responses::StreamEvent::Created { .. }
872 | copilot_responses::StreamEvent::Unknown => Vec::new(),
873 }
874 }
875}
876
877fn into_copilot_chat(
878 model: &CopilotChatModel,
879 request: LanguageModelRequest,
880) -> Result<CopilotChatRequest> {
881 let temperature = request.temperature;
882 let tool_choice = request.tool_choice;
883
884 let mut request_messages: Vec<LanguageModelRequestMessage> = Vec::new();
885 for message in request.messages {
886 if let Some(last_message) = request_messages.last_mut() {
887 if last_message.role == message.role {
888 last_message.content.extend(message.content);
889 } else {
890 request_messages.push(message);
891 }
892 } else {
893 request_messages.push(message);
894 }
895 }
896
897 let mut messages: Vec<ChatMessage> = Vec::new();
898 for message in request_messages {
899 match message.role {
900 Role::User => {
901 for content in &message.content {
902 if let MessageContent::ToolResult(tool_result) = content {
903 let content = match &tool_result.content {
904 LanguageModelToolResultContent::Text(text) => text.to_string().into(),
905 LanguageModelToolResultContent::Image(image) => {
906 if model.supports_vision() {
907 ChatMessageContent::Multipart(vec![ChatMessagePart::Image {
908 image_url: ImageUrl {
909 url: image.to_base64_url(),
910 },
911 }])
912 } else {
913 debug_panic!(
914 "This should be caught at {} level",
915 tool_result.tool_name
916 );
917 "[Tool responded with an image, but this model does not support vision]".to_string().into()
918 }
919 }
920 };
921
922 messages.push(ChatMessage::Tool {
923 tool_call_id: tool_result.tool_use_id.to_string(),
924 content,
925 });
926 }
927 }
928
929 let mut content_parts = Vec::new();
930 for content in &message.content {
931 match content {
932 MessageContent::Text(text) | MessageContent::Thinking { text, .. }
933 if !text.is_empty() =>
934 {
935 if let Some(ChatMessagePart::Text { text: text_content }) =
936 content_parts.last_mut()
937 {
938 text_content.push_str(text);
939 } else {
940 content_parts.push(ChatMessagePart::Text {
941 text: text.to_string(),
942 });
943 }
944 }
945 MessageContent::Image(image) if model.supports_vision() => {
946 content_parts.push(ChatMessagePart::Image {
947 image_url: ImageUrl {
948 url: image.to_base64_url(),
949 },
950 });
951 }
952 _ => {}
953 }
954 }
955
956 if !content_parts.is_empty() {
957 messages.push(ChatMessage::User {
958 content: content_parts.into(),
959 });
960 }
961 }
962 Role::Assistant => {
963 let mut tool_calls = Vec::new();
964 for content in &message.content {
965 if let MessageContent::ToolUse(tool_use) = content {
966 tool_calls.push(ToolCall {
967 id: tool_use.id.to_string(),
968 content: ToolCallContent::Function {
969 function: FunctionContent {
970 name: tool_use.name.to_string(),
971 arguments: serde_json::to_string(&tool_use.input)?,
972 thought_signature: tool_use.thought_signature.clone(),
973 },
974 },
975 });
976 }
977 }
978
979 let text_content = {
980 let mut buffer = String::new();
981 for string in message.content.iter().filter_map(|content| match content {
982 MessageContent::Text(text) => Some(text.as_str()),
983 MessageContent::Thinking { .. }
984 | MessageContent::ToolUse(_)
985 | MessageContent::RedactedThinking(_)
986 | MessageContent::ToolResult(_)
987 | MessageContent::Image(_) => None,
988 }) {
989 buffer.push_str(string);
990 }
991
992 buffer
993 };
994
995 // Extract reasoning_opaque and reasoning_text from reasoning_details
996 let (reasoning_opaque, reasoning_text) =
997 if let Some(details) = &message.reasoning_details {
998 let opaque = details
999 .get("reasoning_opaque")
1000 .and_then(|v| v.as_str())
1001 .map(|s| s.to_string());
1002 let text = details
1003 .get("reasoning_text")
1004 .and_then(|v| v.as_str())
1005 .map(|s| s.to_string());
1006 (opaque, text)
1007 } else {
1008 (None, None)
1009 };
1010
1011 messages.push(ChatMessage::Assistant {
1012 content: if text_content.is_empty() {
1013 ChatMessageContent::empty()
1014 } else {
1015 text_content.into()
1016 },
1017 tool_calls,
1018 reasoning_opaque,
1019 reasoning_text,
1020 });
1021 }
1022 Role::System => messages.push(ChatMessage::System {
1023 content: message.string_contents(),
1024 }),
1025 }
1026 }
1027
1028 let tools = request
1029 .tools
1030 .iter()
1031 .map(|tool| Tool::Function {
1032 function: Function {
1033 name: tool.name.clone(),
1034 description: tool.description.clone(),
1035 parameters: tool.input_schema.clone(),
1036 },
1037 })
1038 .collect::<Vec<_>>();
1039
1040 Ok(CopilotChatRequest {
1041 n: 1,
1042 stream: model.uses_streaming(),
1043 temperature: temperature.unwrap_or(0.1),
1044 model: model.id().to_string(),
1045 messages,
1046 tools,
1047 tool_choice: tool_choice.map(|choice| match choice {
1048 LanguageModelToolChoice::Auto => ToolChoice::Auto,
1049 LanguageModelToolChoice::Any => ToolChoice::Any,
1050 LanguageModelToolChoice::None => ToolChoice::None,
1051 }),
1052 thinking_budget: None,
1053 })
1054}
1055
1056fn compute_thinking_budget(
1057 min_budget: Option<u32>,
1058 max_budget: Option<u32>,
1059 max_output_tokens: u32,
1060) -> Option<u32> {
1061 let configured_budget: u32 = 16000;
1062 let min_budget = min_budget.unwrap_or(1024);
1063 let max_budget = max_budget.unwrap_or(max_output_tokens.saturating_sub(1));
1064 let normalized = configured_budget.max(min_budget);
1065 Some(
1066 normalized
1067 .min(max_budget)
1068 .min(max_output_tokens.saturating_sub(1)),
1069 )
1070}
1071
1072fn intent_to_chat_location(intent: Option<CompletionIntent>) -> ChatLocation {
1073 match intent {
1074 Some(CompletionIntent::UserPrompt) => ChatLocation::Agent,
1075 Some(CompletionIntent::ToolResults) => ChatLocation::Agent,
1076 Some(CompletionIntent::ThreadSummarization) => ChatLocation::Panel,
1077 Some(CompletionIntent::ThreadContextSummarization) => ChatLocation::Panel,
1078 Some(CompletionIntent::CreateFile) => ChatLocation::Agent,
1079 Some(CompletionIntent::EditFile) => ChatLocation::Agent,
1080 Some(CompletionIntent::InlineAssist) => ChatLocation::Editor,
1081 Some(CompletionIntent::TerminalInlineAssist) => ChatLocation::Terminal,
1082 Some(CompletionIntent::GenerateGitCommitMessage) => ChatLocation::Other,
1083 None => ChatLocation::Panel,
1084 }
1085}
1086
1087fn into_copilot_responses(
1088 model: &CopilotChatModel,
1089 request: LanguageModelRequest,
1090) -> copilot_responses::Request {
1091 use copilot_responses as responses;
1092
1093 let LanguageModelRequest {
1094 thread_id: _,
1095 prompt_id: _,
1096 intent: _,
1097 messages,
1098 tools,
1099 tool_choice,
1100 stop: _,
1101 temperature,
1102 thinking_allowed,
1103 thinking_effort: _,
1104 speed: _,
1105 } = request;
1106
1107 let mut input_items: Vec<responses::ResponseInputItem> = Vec::new();
1108
1109 for message in messages {
1110 match message.role {
1111 Role::User => {
1112 for content in &message.content {
1113 if let MessageContent::ToolResult(tool_result) = content {
1114 let output = if let Some(out) = &tool_result.output {
1115 match out {
1116 serde_json::Value::String(s) => {
1117 responses::ResponseFunctionOutput::Text(s.clone())
1118 }
1119 serde_json::Value::Null => {
1120 responses::ResponseFunctionOutput::Text(String::new())
1121 }
1122 other => responses::ResponseFunctionOutput::Text(other.to_string()),
1123 }
1124 } else {
1125 match &tool_result.content {
1126 LanguageModelToolResultContent::Text(text) => {
1127 responses::ResponseFunctionOutput::Text(text.to_string())
1128 }
1129 LanguageModelToolResultContent::Image(image) => {
1130 if model.supports_vision() {
1131 responses::ResponseFunctionOutput::Content(vec![
1132 responses::ResponseInputContent::InputImage {
1133 image_url: Some(image.to_base64_url()),
1134 detail: Default::default(),
1135 },
1136 ])
1137 } else {
1138 debug_panic!(
1139 "This should be caught at {} level",
1140 tool_result.tool_name
1141 );
1142 responses::ResponseFunctionOutput::Text(
1143 "[Tool responded with an image, but this model does not support vision]".into(),
1144 )
1145 }
1146 }
1147 }
1148 };
1149
1150 input_items.push(responses::ResponseInputItem::FunctionCallOutput {
1151 call_id: tool_result.tool_use_id.to_string(),
1152 output,
1153 status: None,
1154 });
1155 }
1156 }
1157
1158 let mut parts: Vec<responses::ResponseInputContent> = Vec::new();
1159 for content in &message.content {
1160 match content {
1161 MessageContent::Text(text) => {
1162 parts.push(responses::ResponseInputContent::InputText {
1163 text: text.clone(),
1164 });
1165 }
1166
1167 MessageContent::Image(image) => {
1168 if model.supports_vision() {
1169 parts.push(responses::ResponseInputContent::InputImage {
1170 image_url: Some(image.to_base64_url()),
1171 detail: Default::default(),
1172 });
1173 }
1174 }
1175 _ => {}
1176 }
1177 }
1178
1179 if !parts.is_empty() {
1180 input_items.push(responses::ResponseInputItem::Message {
1181 role: "user".into(),
1182 content: Some(parts),
1183 status: None,
1184 });
1185 }
1186 }
1187
1188 Role::Assistant => {
1189 for content in &message.content {
1190 if let MessageContent::ToolUse(tool_use) = content {
1191 input_items.push(responses::ResponseInputItem::FunctionCall {
1192 call_id: tool_use.id.to_string(),
1193 name: tool_use.name.to_string(),
1194 arguments: tool_use.raw_input.clone(),
1195 status: None,
1196 thought_signature: tool_use.thought_signature.clone(),
1197 });
1198 }
1199 }
1200
1201 for content in &message.content {
1202 if let MessageContent::RedactedThinking(data) = content {
1203 input_items.push(responses::ResponseInputItem::Reasoning {
1204 id: None,
1205 summary: Vec::new(),
1206 encrypted_content: data.clone(),
1207 });
1208 }
1209 }
1210
1211 let mut parts: Vec<responses::ResponseInputContent> = Vec::new();
1212 for content in &message.content {
1213 match content {
1214 MessageContent::Text(text) => {
1215 parts.push(responses::ResponseInputContent::OutputText {
1216 text: text.clone(),
1217 });
1218 }
1219 MessageContent::Image(_) => {
1220 parts.push(responses::ResponseInputContent::OutputText {
1221 text: "[image omitted]".to_string(),
1222 });
1223 }
1224 _ => {}
1225 }
1226 }
1227
1228 if !parts.is_empty() {
1229 input_items.push(responses::ResponseInputItem::Message {
1230 role: "assistant".into(),
1231 content: Some(parts),
1232 status: Some("completed".into()),
1233 });
1234 }
1235 }
1236
1237 Role::System => {
1238 let mut parts: Vec<responses::ResponseInputContent> = Vec::new();
1239 for content in &message.content {
1240 if let MessageContent::Text(text) = content {
1241 parts.push(responses::ResponseInputContent::InputText {
1242 text: text.clone(),
1243 });
1244 }
1245 }
1246
1247 if !parts.is_empty() {
1248 input_items.push(responses::ResponseInputItem::Message {
1249 role: "system".into(),
1250 content: Some(parts),
1251 status: None,
1252 });
1253 }
1254 }
1255 }
1256 }
1257
1258 let converted_tools: Vec<responses::ToolDefinition> = tools
1259 .into_iter()
1260 .map(|tool| responses::ToolDefinition::Function {
1261 name: tool.name,
1262 description: Some(tool.description),
1263 parameters: Some(tool.input_schema),
1264 strict: None,
1265 })
1266 .collect();
1267
1268 let mapped_tool_choice = tool_choice.map(|choice| match choice {
1269 LanguageModelToolChoice::Auto => responses::ToolChoice::Auto,
1270 LanguageModelToolChoice::Any => responses::ToolChoice::Any,
1271 LanguageModelToolChoice::None => responses::ToolChoice::None,
1272 });
1273
1274 responses::Request {
1275 model: model.id().to_string(),
1276 input: input_items,
1277 stream: model.uses_streaming(),
1278 temperature,
1279 tools: converted_tools,
1280 tool_choice: mapped_tool_choice,
1281 reasoning: if thinking_allowed {
1282 Some(copilot_responses::ReasoningConfig {
1283 effort: copilot_responses::ReasoningEffort::Medium,
1284 summary: Some(copilot_responses::ReasoningSummary::Detailed),
1285 })
1286 } else {
1287 None
1288 },
1289 include: Some(vec![
1290 copilot_responses::ResponseIncludable::ReasoningEncryptedContent,
1291 ]),
1292 store: false,
1293 }
1294}
1295
1296#[cfg(test)]
1297mod tests {
1298 use super::*;
1299 use copilot_chat::responses;
1300 use futures::StreamExt;
1301
1302 fn map_events(events: Vec<responses::StreamEvent>) -> Vec<LanguageModelCompletionEvent> {
1303 futures::executor::block_on(async {
1304 CopilotResponsesEventMapper::new()
1305 .map_stream(Box::pin(futures::stream::iter(events.into_iter().map(Ok))))
1306 .collect::<Vec<_>>()
1307 .await
1308 .into_iter()
1309 .map(Result::unwrap)
1310 .collect()
1311 })
1312 }
1313
1314 #[test]
1315 fn responses_stream_maps_text_and_usage() {
1316 let events = vec![
1317 responses::StreamEvent::OutputItemAdded {
1318 output_index: 0,
1319 sequence_number: None,
1320 item: responses::ResponseOutputItem::Message {
1321 id: "msg_1".into(),
1322 role: "assistant".into(),
1323 content: Some(Vec::new()),
1324 },
1325 },
1326 responses::StreamEvent::OutputTextDelta {
1327 item_id: "msg_1".into(),
1328 output_index: 0,
1329 delta: "Hello".into(),
1330 },
1331 responses::StreamEvent::Completed {
1332 response: responses::Response {
1333 usage: Some(responses::ResponseUsage {
1334 input_tokens: Some(5),
1335 output_tokens: Some(3),
1336 total_tokens: Some(8),
1337 }),
1338 ..Default::default()
1339 },
1340 },
1341 ];
1342
1343 let mapped = map_events(events);
1344 assert!(matches!(
1345 mapped[0],
1346 LanguageModelCompletionEvent::StartMessage { ref message_id } if message_id == "msg_1"
1347 ));
1348 assert!(matches!(
1349 mapped[1],
1350 LanguageModelCompletionEvent::Text(ref text) if text == "Hello"
1351 ));
1352 assert!(matches!(
1353 mapped[2],
1354 LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
1355 input_tokens: 5,
1356 output_tokens: 3,
1357 ..
1358 })
1359 ));
1360 assert!(matches!(
1361 mapped[3],
1362 LanguageModelCompletionEvent::Stop(StopReason::EndTurn)
1363 ));
1364 }
1365
1366 #[test]
1367 fn responses_stream_maps_tool_calls() {
1368 let events = vec![responses::StreamEvent::OutputItemDone {
1369 output_index: 0,
1370 sequence_number: None,
1371 item: responses::ResponseOutputItem::FunctionCall {
1372 id: Some("fn_1".into()),
1373 call_id: "call_1".into(),
1374 name: "do_it".into(),
1375 arguments: "{\"x\":1}".into(),
1376 status: None,
1377 thought_signature: None,
1378 },
1379 }];
1380
1381 let mapped = map_events(events);
1382 assert!(matches!(
1383 mapped[0],
1384 LanguageModelCompletionEvent::ToolUse(ref use_) if use_.id.to_string() == "call_1" && use_.name.as_ref() == "do_it"
1385 ));
1386 assert!(matches!(
1387 mapped[1],
1388 LanguageModelCompletionEvent::Stop(StopReason::ToolUse)
1389 ));
1390 }
1391
1392 #[test]
1393 fn responses_stream_handles_json_parse_error() {
1394 let events = vec![responses::StreamEvent::OutputItemDone {
1395 output_index: 0,
1396 sequence_number: None,
1397 item: responses::ResponseOutputItem::FunctionCall {
1398 id: Some("fn_1".into()),
1399 call_id: "call_1".into(),
1400 name: "do_it".into(),
1401 arguments: "{not json}".into(),
1402 status: None,
1403 thought_signature: None,
1404 },
1405 }];
1406
1407 let mapped = map_events(events);
1408 assert!(matches!(
1409 mapped[0],
1410 LanguageModelCompletionEvent::ToolUseJsonParseError { ref id, ref tool_name, .. }
1411 if id.to_string() == "call_1" && tool_name.as_ref() == "do_it"
1412 ));
1413 assert!(matches!(
1414 mapped[1],
1415 LanguageModelCompletionEvent::Stop(StopReason::ToolUse)
1416 ));
1417 }
1418
1419 #[test]
1420 fn responses_stream_maps_reasoning_summary_and_encrypted_content() {
1421 let events = vec![responses::StreamEvent::OutputItemDone {
1422 output_index: 0,
1423 sequence_number: None,
1424 item: responses::ResponseOutputItem::Reasoning {
1425 id: "r1".into(),
1426 summary: Some(vec![responses::ResponseReasoningItem {
1427 kind: "summary_text".into(),
1428 text: "Chain".into(),
1429 }]),
1430 encrypted_content: Some("ENC".into()),
1431 },
1432 }];
1433
1434 let mapped = map_events(events);
1435 assert!(matches!(
1436 mapped[0],
1437 LanguageModelCompletionEvent::Thinking { ref text, signature: None } if text == "Chain"
1438 ));
1439 assert!(matches!(
1440 mapped[1],
1441 LanguageModelCompletionEvent::RedactedThinking { ref data } if data == "ENC"
1442 ));
1443 }
1444
1445 #[test]
1446 fn responses_stream_handles_incomplete_max_tokens() {
1447 let events = vec![responses::StreamEvent::Incomplete {
1448 response: responses::Response {
1449 usage: Some(responses::ResponseUsage {
1450 input_tokens: Some(10),
1451 output_tokens: Some(0),
1452 total_tokens: Some(10),
1453 }),
1454 incomplete_details: Some(responses::IncompleteDetails {
1455 reason: Some(responses::IncompleteReason::MaxOutputTokens),
1456 }),
1457 ..Default::default()
1458 },
1459 }];
1460
1461 let mapped = map_events(events);
1462 assert!(matches!(
1463 mapped[0],
1464 LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
1465 input_tokens: 10,
1466 output_tokens: 0,
1467 ..
1468 })
1469 ));
1470 assert!(matches!(
1471 mapped[1],
1472 LanguageModelCompletionEvent::Stop(StopReason::MaxTokens)
1473 ));
1474 }
1475
1476 #[test]
1477 fn responses_stream_handles_incomplete_content_filter() {
1478 let events = vec![responses::StreamEvent::Incomplete {
1479 response: responses::Response {
1480 usage: None,
1481 incomplete_details: Some(responses::IncompleteDetails {
1482 reason: Some(responses::IncompleteReason::ContentFilter),
1483 }),
1484 ..Default::default()
1485 },
1486 }];
1487
1488 let mapped = map_events(events);
1489 assert!(matches!(
1490 mapped.last().unwrap(),
1491 LanguageModelCompletionEvent::Stop(StopReason::Refusal)
1492 ));
1493 }
1494
1495 #[test]
1496 fn responses_stream_completed_no_duplicate_after_tool_use() {
1497 let events = vec![
1498 responses::StreamEvent::OutputItemDone {
1499 output_index: 0,
1500 sequence_number: None,
1501 item: responses::ResponseOutputItem::FunctionCall {
1502 id: Some("fn_1".into()),
1503 call_id: "call_1".into(),
1504 name: "do_it".into(),
1505 arguments: "{}".into(),
1506 status: None,
1507 thought_signature: None,
1508 },
1509 },
1510 responses::StreamEvent::Completed {
1511 response: responses::Response::default(),
1512 },
1513 ];
1514
1515 let mapped = map_events(events);
1516
1517 let mut stop_count = 0usize;
1518 let mut saw_tool_use_stop = false;
1519 for event in mapped {
1520 if let LanguageModelCompletionEvent::Stop(reason) = event {
1521 stop_count += 1;
1522 if matches!(reason, StopReason::ToolUse) {
1523 saw_tool_use_stop = true;
1524 }
1525 }
1526 }
1527 assert_eq!(stop_count, 1, "should emit exactly one Stop event");
1528 assert!(saw_tool_use_stop, "Stop reason should be ToolUse");
1529 }
1530
1531 #[test]
1532 fn responses_stream_failed_maps_http_response_error() {
1533 let events = vec![responses::StreamEvent::Failed {
1534 response: responses::Response {
1535 error: Some(responses::ResponseError {
1536 code: "429".into(),
1537 message: "too many requests".into(),
1538 }),
1539 ..Default::default()
1540 },
1541 }];
1542
1543 let mapped_results = futures::executor::block_on(async {
1544 CopilotResponsesEventMapper::new()
1545 .map_stream(Box::pin(futures::stream::iter(events.into_iter().map(Ok))))
1546 .collect::<Vec<_>>()
1547 .await
1548 });
1549
1550 assert_eq!(mapped_results.len(), 1);
1551 match &mapped_results[0] {
1552 Err(LanguageModelCompletionError::HttpResponseError {
1553 status_code,
1554 message,
1555 ..
1556 }) => {
1557 assert_eq!(*status_code, http_client::StatusCode::TOO_MANY_REQUESTS);
1558 assert_eq!(message, "too many requests");
1559 }
1560 other => panic!("expected HttpResponseError, got {:?}", other),
1561 }
1562 }
1563
1564 #[test]
1565 fn chat_completions_stream_maps_reasoning_data() {
1566 use copilot_chat::{
1567 FunctionChunk, ResponseChoice, ResponseDelta, ResponseEvent, Role, ToolCallChunk,
1568 };
1569
1570 let events = vec![
1571 ResponseEvent {
1572 choices: vec![ResponseChoice {
1573 index: Some(0),
1574 finish_reason: None,
1575 delta: Some(ResponseDelta {
1576 content: None,
1577 role: Some(Role::Assistant),
1578 tool_calls: vec![ToolCallChunk {
1579 index: Some(0),
1580 id: Some("call_abc123".to_string()),
1581 function: Some(FunctionChunk {
1582 name: Some("list_directory".to_string()),
1583 arguments: Some("{\"path\":\"test\"}".to_string()),
1584 thought_signature: None,
1585 }),
1586 }],
1587 reasoning_opaque: Some("encrypted_reasoning_token_xyz".to_string()),
1588 reasoning_text: Some("Let me check the directory".to_string()),
1589 }),
1590 message: None,
1591 }],
1592 id: "chatcmpl-123".to_string(),
1593 usage: None,
1594 },
1595 ResponseEvent {
1596 choices: vec![ResponseChoice {
1597 index: Some(0),
1598 finish_reason: Some("tool_calls".to_string()),
1599 delta: Some(ResponseDelta {
1600 content: None,
1601 role: None,
1602 tool_calls: vec![],
1603 reasoning_opaque: None,
1604 reasoning_text: None,
1605 }),
1606 message: None,
1607 }],
1608 id: "chatcmpl-123".to_string(),
1609 usage: None,
1610 },
1611 ];
1612
1613 let mapped = futures::executor::block_on(async {
1614 map_to_language_model_completion_events(
1615 Box::pin(futures::stream::iter(events.into_iter().map(Ok))),
1616 true,
1617 )
1618 .collect::<Vec<_>>()
1619 .await
1620 });
1621
1622 let mut has_reasoning_details = false;
1623 let mut has_tool_use = false;
1624 let mut reasoning_opaque_value: Option<String> = None;
1625 let mut reasoning_text_value: Option<String> = None;
1626
1627 for event_result in mapped {
1628 match event_result {
1629 Ok(LanguageModelCompletionEvent::ReasoningDetails(details)) => {
1630 has_reasoning_details = true;
1631 reasoning_opaque_value = details
1632 .get("reasoning_opaque")
1633 .and_then(|v| v.as_str())
1634 .map(|s| s.to_string());
1635 reasoning_text_value = details
1636 .get("reasoning_text")
1637 .and_then(|v| v.as_str())
1638 .map(|s| s.to_string());
1639 }
1640 Ok(LanguageModelCompletionEvent::ToolUse(tool_use)) => {
1641 has_tool_use = true;
1642 assert_eq!(tool_use.id.to_string(), "call_abc123");
1643 assert_eq!(tool_use.name.as_ref(), "list_directory");
1644 }
1645 _ => {}
1646 }
1647 }
1648
1649 assert!(
1650 has_reasoning_details,
1651 "Should emit ReasoningDetails event for Gemini 3 reasoning"
1652 );
1653 assert!(has_tool_use, "Should emit ToolUse event");
1654 assert_eq!(
1655 reasoning_opaque_value,
1656 Some("encrypted_reasoning_token_xyz".to_string()),
1657 "Should capture reasoning_opaque"
1658 );
1659 assert_eq!(
1660 reasoning_text_value,
1661 Some("Let me check the directory".to_string()),
1662 "Should capture reasoning_text"
1663 );
1664 }
1665}