copilot_chat.rs

   1use std::pin::Pin;
   2use std::str::FromStr as _;
   3use std::sync::Arc;
   4
   5use anyhow::{Result, anyhow};
   6use cloud_llm_client::CompletionIntent;
   7use collections::HashMap;
   8use copilot::copilot_chat::{
   9    ChatMessage, ChatMessageContent, ChatMessagePart, CopilotChat, ImageUrl,
  10    Model as CopilotChatModel, ModelVendor, Request as CopilotChatRequest, ResponseEvent, Tool,
  11    ToolCall,
  12};
  13use copilot::{Copilot, Status};
  14use futures::future::BoxFuture;
  15use futures::stream::BoxStream;
  16use futures::{FutureExt, Stream, StreamExt};
  17use gpui::{AnyView, App, AsyncApp, Entity, Subscription, Task};
  18use http_client::StatusCode;
  19use language::language_settings::all_language_settings;
  20use language_model::{
  21    AuthenticateError, LanguageModel, LanguageModelCompletionError, LanguageModelCompletionEvent,
  22    LanguageModelId, LanguageModelName, LanguageModelProvider, LanguageModelProviderId,
  23    LanguageModelProviderName, LanguageModelProviderState, LanguageModelRequest,
  24    LanguageModelRequestMessage, LanguageModelToolChoice, LanguageModelToolResultContent,
  25    LanguageModelToolSchemaFormat, LanguageModelToolUse, MessageContent, RateLimiter, Role,
  26    StopReason, TokenUsage,
  27};
  28use settings::SettingsStore;
  29use ui::prelude::*;
  30use util::debug_panic;
  31
  32const PROVIDER_ID: LanguageModelProviderId = LanguageModelProviderId::new("copilot_chat");
  33const PROVIDER_NAME: LanguageModelProviderName =
  34    LanguageModelProviderName::new("GitHub Copilot Chat");
  35
  36pub struct CopilotChatLanguageModelProvider {
  37    state: Entity<State>,
  38}
  39
  40pub struct State {
  41    _copilot_chat_subscription: Option<Subscription>,
  42    _settings_subscription: Subscription,
  43}
  44
  45impl State {
  46    fn is_authenticated(&self, cx: &App) -> bool {
  47        CopilotChat::global(cx)
  48            .map(|m| m.read(cx).is_authenticated())
  49            .unwrap_or(false)
  50    }
  51}
  52
  53impl CopilotChatLanguageModelProvider {
  54    pub fn new(cx: &mut App) -> Self {
  55        let state = cx.new(|cx| {
  56            let copilot_chat_subscription = CopilotChat::global(cx)
  57                .map(|copilot_chat| cx.observe(&copilot_chat, |_, _, cx| cx.notify()));
  58            State {
  59                _copilot_chat_subscription: copilot_chat_subscription,
  60                _settings_subscription: cx.observe_global::<SettingsStore>(|_, cx| {
  61                    if let Some(copilot_chat) = CopilotChat::global(cx) {
  62                        let language_settings = all_language_settings(None, cx);
  63                        let configuration = copilot::copilot_chat::CopilotChatConfiguration {
  64                            enterprise_uri: language_settings
  65                                .edit_predictions
  66                                .copilot
  67                                .enterprise_uri
  68                                .clone(),
  69                        };
  70                        copilot_chat.update(cx, |chat, cx| {
  71                            chat.set_configuration(configuration, cx);
  72                        });
  73                    }
  74                    cx.notify();
  75                }),
  76            }
  77        });
  78
  79        Self { state }
  80    }
  81
  82    fn create_language_model(&self, model: CopilotChatModel) -> Arc<dyn LanguageModel> {
  83        Arc::new(CopilotChatLanguageModel {
  84            model,
  85            request_limiter: RateLimiter::new(4),
  86        })
  87    }
  88}
  89
  90impl LanguageModelProviderState for CopilotChatLanguageModelProvider {
  91    type ObservableEntity = State;
  92
  93    fn observable_entity(&self) -> Option<Entity<Self::ObservableEntity>> {
  94        Some(self.state.clone())
  95    }
  96}
  97
  98impl LanguageModelProvider for CopilotChatLanguageModelProvider {
  99    fn id(&self) -> LanguageModelProviderId {
 100        PROVIDER_ID
 101    }
 102
 103    fn name(&self) -> LanguageModelProviderName {
 104        PROVIDER_NAME
 105    }
 106
 107    fn icon(&self) -> IconName {
 108        IconName::Copilot
 109    }
 110
 111    fn default_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
 112        let models = CopilotChat::global(cx).and_then(|m| m.read(cx).models())?;
 113        models
 114            .first()
 115            .map(|model| self.create_language_model(model.clone()))
 116    }
 117
 118    fn default_fast_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
 119        // The default model should be Copilot Chat's 'base model', which is likely a relatively fast
 120        // model (e.g. 4o) and a sensible choice when considering premium requests
 121        self.default_model(cx)
 122    }
 123
 124    fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
 125        let Some(models) = CopilotChat::global(cx).and_then(|m| m.read(cx).models()) else {
 126            return Vec::new();
 127        };
 128        models
 129            .iter()
 130            .map(|model| self.create_language_model(model.clone()))
 131            .collect()
 132    }
 133
 134    fn is_authenticated(&self, cx: &App) -> bool {
 135        self.state.read(cx).is_authenticated(cx)
 136    }
 137
 138    fn authenticate(&self, cx: &mut App) -> Task<Result<(), AuthenticateError>> {
 139        if self.is_authenticated(cx) {
 140            return Task::ready(Ok(()));
 141        };
 142
 143        let Some(copilot) = Copilot::global(cx) else {
 144            return Task::ready(Err(anyhow!(concat!(
 145                "Copilot must be enabled for Copilot Chat to work. ",
 146                "Please enable Copilot and try again."
 147            ))
 148            .into()));
 149        };
 150
 151        let err = match copilot.read(cx).status() {
 152            Status::Authorized => return Task::ready(Ok(())),
 153            Status::Disabled => anyhow!(
 154                "Copilot must be enabled for Copilot Chat to work. Please enable Copilot and try again."
 155            ),
 156            Status::Error(err) => anyhow!(format!(
 157                "Received the following error while signing into Copilot: {err}"
 158            )),
 159            Status::Starting { task: _ } => anyhow!(
 160                "Copilot is still starting, please wait for Copilot to start then try again"
 161            ),
 162            Status::Unauthorized => anyhow!(
 163                "Unable to authorize with Copilot. Please make sure that you have an active Copilot and Copilot Chat subscription."
 164            ),
 165            Status::SignedOut { .. } => {
 166                anyhow!("You have signed out of Copilot. Please sign in to Copilot and try again.")
 167            }
 168            Status::SigningIn { prompt: _ } => anyhow!("Still signing into Copilot..."),
 169        };
 170
 171        Task::ready(Err(err.into()))
 172    }
 173
 174    fn configuration_view(
 175        &self,
 176        _target_agent: language_model::ConfigurationViewTargetAgent,
 177        _: &mut Window,
 178        cx: &mut App,
 179    ) -> AnyView {
 180        cx.new(|cx| {
 181            copilot::ConfigurationView::new(
 182                |cx| {
 183                    CopilotChat::global(cx)
 184                        .map(|m| m.read(cx).is_authenticated())
 185                        .unwrap_or(false)
 186                },
 187                copilot::ConfigurationMode::Chat,
 188                cx,
 189            )
 190        })
 191        .into()
 192    }
 193
 194    fn reset_credentials(&self, _cx: &mut App) -> Task<Result<()>> {
 195        Task::ready(Err(anyhow!(
 196            "Signing out of GitHub Copilot Chat is currently not supported."
 197        )))
 198    }
 199}
 200
 201fn collect_tiktoken_messages(
 202    request: LanguageModelRequest,
 203) -> Vec<tiktoken_rs::ChatCompletionRequestMessage> {
 204    request
 205        .messages
 206        .into_iter()
 207        .map(|message| tiktoken_rs::ChatCompletionRequestMessage {
 208            role: match message.role {
 209                Role::User => "user".into(),
 210                Role::Assistant => "assistant".into(),
 211                Role::System => "system".into(),
 212            },
 213            content: Some(message.string_contents()),
 214            name: None,
 215            function_call: None,
 216        })
 217        .collect::<Vec<_>>()
 218}
 219
 220pub struct CopilotChatLanguageModel {
 221    model: CopilotChatModel,
 222    request_limiter: RateLimiter,
 223}
 224
 225impl LanguageModel for CopilotChatLanguageModel {
 226    fn id(&self) -> LanguageModelId {
 227        LanguageModelId::from(self.model.id().to_string())
 228    }
 229
 230    fn name(&self) -> LanguageModelName {
 231        LanguageModelName::from(self.model.display_name().to_string())
 232    }
 233
 234    fn provider_id(&self) -> LanguageModelProviderId {
 235        PROVIDER_ID
 236    }
 237
 238    fn provider_name(&self) -> LanguageModelProviderName {
 239        PROVIDER_NAME
 240    }
 241
 242    fn supports_tools(&self) -> bool {
 243        self.model.supports_tools()
 244    }
 245
 246    fn supports_images(&self) -> bool {
 247        self.model.supports_vision()
 248    }
 249
 250    fn tool_input_format(&self) -> LanguageModelToolSchemaFormat {
 251        match self.model.vendor() {
 252            ModelVendor::OpenAI | ModelVendor::Anthropic => {
 253                LanguageModelToolSchemaFormat::JsonSchema
 254            }
 255            ModelVendor::Google | ModelVendor::XAI | ModelVendor::Unknown => {
 256                LanguageModelToolSchemaFormat::JsonSchemaSubset
 257            }
 258        }
 259    }
 260
 261    fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
 262        match choice {
 263            LanguageModelToolChoice::Auto
 264            | LanguageModelToolChoice::Any
 265            | LanguageModelToolChoice::None => self.supports_tools(),
 266        }
 267    }
 268
 269    fn telemetry_id(&self) -> String {
 270        format!("copilot_chat/{}", self.model.id())
 271    }
 272
 273    fn max_token_count(&self) -> u64 {
 274        self.model.max_token_count()
 275    }
 276
 277    fn count_tokens(
 278        &self,
 279        request: LanguageModelRequest,
 280        cx: &App,
 281    ) -> BoxFuture<'static, Result<u64>> {
 282        let model = self.model.clone();
 283        cx.background_spawn(async move {
 284            let messages = collect_tiktoken_messages(request);
 285            // Copilot uses OpenAI tiktoken tokenizer for all it's model irrespective of the underlying provider(vendor).
 286            let tokenizer_model = match model.tokenizer() {
 287                Some("o200k_base") => "gpt-4o",
 288                Some("cl100k_base") => "gpt-4",
 289                _ => "gpt-4o",
 290            };
 291
 292            tiktoken_rs::num_tokens_from_messages(tokenizer_model, &messages)
 293                .map(|tokens| tokens as u64)
 294        })
 295        .boxed()
 296    }
 297
 298    fn stream_completion(
 299        &self,
 300        request: LanguageModelRequest,
 301        cx: &AsyncApp,
 302    ) -> BoxFuture<
 303        'static,
 304        Result<
 305            BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>,
 306            LanguageModelCompletionError,
 307        >,
 308    > {
 309        let is_user_initiated = request.intent.is_none_or(|intent| match intent {
 310            CompletionIntent::UserPrompt
 311            | CompletionIntent::ThreadContextSummarization
 312            | CompletionIntent::InlineAssist
 313            | CompletionIntent::TerminalInlineAssist
 314            | CompletionIntent::GenerateGitCommitMessage => true,
 315
 316            CompletionIntent::ToolResults
 317            | CompletionIntent::ThreadSummarization
 318            | CompletionIntent::CreateFile
 319            | CompletionIntent::EditFile => false,
 320        });
 321
 322        if self.model.supports_response() {
 323            let responses_request = into_copilot_responses(&self.model, request);
 324            let request_limiter = self.request_limiter.clone();
 325            let future = cx.spawn(async move |cx| {
 326                let request =
 327                    CopilotChat::stream_response(responses_request, is_user_initiated, cx.clone());
 328                request_limiter
 329                    .stream(async move {
 330                        let stream = request.await?;
 331                        let mapper = CopilotResponsesEventMapper::new();
 332                        Ok(mapper.map_stream(stream).boxed())
 333                    })
 334                    .await
 335            });
 336            return async move { Ok(future.await?.boxed()) }.boxed();
 337        }
 338
 339        let copilot_request = match into_copilot_chat(&self.model, request) {
 340            Ok(request) => request,
 341            Err(err) => return futures::future::ready(Err(err.into())).boxed(),
 342        };
 343        let is_streaming = copilot_request.stream;
 344
 345        let request_limiter = self.request_limiter.clone();
 346        let future = cx.spawn(async move |cx| {
 347            let request =
 348                CopilotChat::stream_completion(copilot_request, is_user_initiated, cx.clone());
 349            request_limiter
 350                .stream(async move {
 351                    let response = request.await?;
 352                    Ok(map_to_language_model_completion_events(
 353                        response,
 354                        is_streaming,
 355                    ))
 356                })
 357                .await
 358        });
 359        async move { Ok(future.await?.boxed()) }.boxed()
 360    }
 361}
 362
 363pub fn map_to_language_model_completion_events(
 364    events: Pin<Box<dyn Send + Stream<Item = Result<ResponseEvent>>>>,
 365    is_streaming: bool,
 366) -> impl Stream<Item = Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
 367    #[derive(Default)]
 368    struct RawToolCall {
 369        id: String,
 370        name: String,
 371        arguments: String,
 372        thought_signature: Option<String>,
 373    }
 374
 375    struct State {
 376        events: Pin<Box<dyn Send + Stream<Item = Result<ResponseEvent>>>>,
 377        tool_calls_by_index: HashMap<usize, RawToolCall>,
 378        reasoning_opaque: Option<String>,
 379        reasoning_text: Option<String>,
 380    }
 381
 382    futures::stream::unfold(
 383        State {
 384            events,
 385            tool_calls_by_index: HashMap::default(),
 386            reasoning_opaque: None,
 387            reasoning_text: None,
 388        },
 389        move |mut state| async move {
 390            if let Some(event) = state.events.next().await {
 391                match event {
 392                    Ok(event) => {
 393                        let Some(choice) = event.choices.first() else {
 394                            return Some((
 395                                vec![Err(anyhow!("Response contained no choices").into())],
 396                                state,
 397                            ));
 398                        };
 399
 400                        let delta = if is_streaming {
 401                            choice.delta.as_ref()
 402                        } else {
 403                            choice.message.as_ref()
 404                        };
 405
 406                        let Some(delta) = delta else {
 407                            return Some((
 408                                vec![Err(anyhow!("Response contained no delta").into())],
 409                                state,
 410                            ));
 411                        };
 412
 413                        let mut events = Vec::new();
 414                        if let Some(content) = delta.content.clone() {
 415                            events.push(Ok(LanguageModelCompletionEvent::Text(content)));
 416                        }
 417
 418                        // Capture reasoning data from the delta (e.g. for Gemini 3)
 419                        if let Some(opaque) = delta.reasoning_opaque.clone() {
 420                            state.reasoning_opaque = Some(opaque);
 421                        }
 422                        if let Some(text) = delta.reasoning_text.clone() {
 423                            state.reasoning_text = Some(text);
 424                        }
 425
 426                        for (index, tool_call) in delta.tool_calls.iter().enumerate() {
 427                            let tool_index = tool_call.index.unwrap_or(index);
 428                            let entry = state.tool_calls_by_index.entry(tool_index).or_default();
 429
 430                            if let Some(tool_id) = tool_call.id.clone() {
 431                                entry.id = tool_id;
 432                            }
 433
 434                            if let Some(function) = tool_call.function.as_ref() {
 435                                if let Some(name) = function.name.clone() {
 436                                    entry.name = name;
 437                                }
 438
 439                                if let Some(arguments) = function.arguments.clone() {
 440                                    entry.arguments.push_str(&arguments);
 441                                }
 442
 443                                if let Some(thought_signature) = function.thought_signature.clone()
 444                                {
 445                                    entry.thought_signature = Some(thought_signature);
 446                                }
 447                            }
 448                        }
 449
 450                        if let Some(usage) = event.usage {
 451                            events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(
 452                                TokenUsage {
 453                                    input_tokens: usage.prompt_tokens,
 454                                    output_tokens: usage.completion_tokens,
 455                                    cache_creation_input_tokens: 0,
 456                                    cache_read_input_tokens: 0,
 457                                },
 458                            )));
 459                        }
 460
 461                        match choice.finish_reason.as_deref() {
 462                            Some("stop") => {
 463                                events.push(Ok(LanguageModelCompletionEvent::Stop(
 464                                    StopReason::EndTurn,
 465                                )));
 466                            }
 467                            Some("tool_calls") => {
 468                                // Gemini 3 models send reasoning_opaque/reasoning_text that must
 469                                // be preserved and sent back in subsequent requests. Emit as
 470                                // ReasoningDetails so the agent stores it in the message.
 471                                if state.reasoning_opaque.is_some()
 472                                    || state.reasoning_text.is_some()
 473                                {
 474                                    let mut details = serde_json::Map::new();
 475                                    if let Some(opaque) = state.reasoning_opaque.take() {
 476                                        details.insert(
 477                                            "reasoning_opaque".to_string(),
 478                                            serde_json::Value::String(opaque),
 479                                        );
 480                                    }
 481                                    if let Some(text) = state.reasoning_text.take() {
 482                                        details.insert(
 483                                            "reasoning_text".to_string(),
 484                                            serde_json::Value::String(text),
 485                                        );
 486                                    }
 487                                    events.push(Ok(
 488                                        LanguageModelCompletionEvent::ReasoningDetails(
 489                                            serde_json::Value::Object(details),
 490                                        ),
 491                                    ));
 492                                }
 493
 494                                events.extend(state.tool_calls_by_index.drain().map(
 495                                    |(_, tool_call)| {
 496                                        // The model can output an empty string
 497                                        // to indicate the absence of arguments.
 498                                        // When that happens, create an empty
 499                                        // object instead.
 500                                        let arguments = if tool_call.arguments.is_empty() {
 501                                            Ok(serde_json::Value::Object(Default::default()))
 502                                        } else {
 503                                            serde_json::Value::from_str(&tool_call.arguments)
 504                                        };
 505                                        match arguments {
 506                                        Ok(input) => Ok(LanguageModelCompletionEvent::ToolUse(
 507                                            LanguageModelToolUse {
 508                                                id: tool_call.id.into(),
 509                                                name: tool_call.name.as_str().into(),
 510                                                is_input_complete: true,
 511                                                input,
 512                                                raw_input: tool_call.arguments,
 513                                                thought_signature: tool_call.thought_signature,
 514                                            },
 515                                        )),
 516                                        Err(error) => Ok(
 517                                            LanguageModelCompletionEvent::ToolUseJsonParseError {
 518                                                id: tool_call.id.into(),
 519                                                tool_name: tool_call.name.as_str().into(),
 520                                                raw_input: tool_call.arguments.into(),
 521                                                json_parse_error: error.to_string(),
 522                                            },
 523                                        ),
 524                                    }
 525                                    },
 526                                ));
 527
 528                                events.push(Ok(LanguageModelCompletionEvent::Stop(
 529                                    StopReason::ToolUse,
 530                                )));
 531                            }
 532                            Some(stop_reason) => {
 533                                log::error!("Unexpected Copilot Chat stop_reason: {stop_reason:?}");
 534                                events.push(Ok(LanguageModelCompletionEvent::Stop(
 535                                    StopReason::EndTurn,
 536                                )));
 537                            }
 538                            None => {}
 539                        }
 540
 541                        return Some((events, state));
 542                    }
 543                    Err(err) => return Some((vec![Err(anyhow!(err).into())], state)),
 544                }
 545            }
 546
 547            None
 548        },
 549    )
 550    .flat_map(futures::stream::iter)
 551}
 552
 553pub struct CopilotResponsesEventMapper {
 554    pending_stop_reason: Option<StopReason>,
 555}
 556
 557impl CopilotResponsesEventMapper {
 558    pub fn new() -> Self {
 559        Self {
 560            pending_stop_reason: None,
 561        }
 562    }
 563
 564    pub fn map_stream(
 565        mut self,
 566        events: Pin<Box<dyn Send + Stream<Item = Result<copilot::copilot_responses::StreamEvent>>>>,
 567    ) -> impl Stream<Item = Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
 568    {
 569        events.flat_map(move |event| {
 570            futures::stream::iter(match event {
 571                Ok(event) => self.map_event(event),
 572                Err(error) => vec![Err(LanguageModelCompletionError::from(anyhow!(error)))],
 573            })
 574        })
 575    }
 576
 577    fn map_event(
 578        &mut self,
 579        event: copilot::copilot_responses::StreamEvent,
 580    ) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
 581        match event {
 582            copilot::copilot_responses::StreamEvent::OutputItemAdded { item, .. } => match item {
 583                copilot::copilot_responses::ResponseOutputItem::Message { id, .. } => {
 584                    vec![Ok(LanguageModelCompletionEvent::StartMessage {
 585                        message_id: id,
 586                    })]
 587                }
 588                _ => Vec::new(),
 589            },
 590
 591            copilot::copilot_responses::StreamEvent::OutputTextDelta { delta, .. } => {
 592                if delta.is_empty() {
 593                    Vec::new()
 594                } else {
 595                    vec![Ok(LanguageModelCompletionEvent::Text(delta))]
 596                }
 597            }
 598
 599            copilot::copilot_responses::StreamEvent::OutputItemDone { item, .. } => match item {
 600                copilot::copilot_responses::ResponseOutputItem::Message { .. } => Vec::new(),
 601                copilot::copilot_responses::ResponseOutputItem::FunctionCall {
 602                    call_id,
 603                    name,
 604                    arguments,
 605                    thought_signature,
 606                    ..
 607                } => {
 608                    let mut events = Vec::new();
 609                    match serde_json::from_str::<serde_json::Value>(&arguments) {
 610                        Ok(input) => events.push(Ok(LanguageModelCompletionEvent::ToolUse(
 611                            LanguageModelToolUse {
 612                                id: call_id.into(),
 613                                name: name.as_str().into(),
 614                                is_input_complete: true,
 615                                input,
 616                                raw_input: arguments.clone(),
 617                                thought_signature,
 618                            },
 619                        ))),
 620                        Err(error) => {
 621                            events.push(Ok(LanguageModelCompletionEvent::ToolUseJsonParseError {
 622                                id: call_id.into(),
 623                                tool_name: name.as_str().into(),
 624                                raw_input: arguments.clone().into(),
 625                                json_parse_error: error.to_string(),
 626                            }))
 627                        }
 628                    }
 629                    // Record that we already emitted a tool-use stop so we can avoid duplicating
 630                    // a Stop event on Completed.
 631                    self.pending_stop_reason = Some(StopReason::ToolUse);
 632                    events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::ToolUse)));
 633                    events
 634                }
 635                copilot::copilot_responses::ResponseOutputItem::Reasoning {
 636                    summary,
 637                    encrypted_content,
 638                    ..
 639                } => {
 640                    let mut events = Vec::new();
 641
 642                    if let Some(blocks) = summary {
 643                        let mut text = String::new();
 644                        for block in blocks {
 645                            text.push_str(&block.text);
 646                        }
 647                        if !text.is_empty() {
 648                            events.push(Ok(LanguageModelCompletionEvent::Thinking {
 649                                text,
 650                                signature: None,
 651                            }));
 652                        }
 653                    }
 654
 655                    if let Some(data) = encrypted_content {
 656                        events.push(Ok(LanguageModelCompletionEvent::RedactedThinking { data }));
 657                    }
 658
 659                    events
 660                }
 661            },
 662
 663            copilot::copilot_responses::StreamEvent::Completed { response } => {
 664                let mut events = Vec::new();
 665                if let Some(usage) = response.usage {
 666                    events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
 667                        input_tokens: usage.input_tokens.unwrap_or(0),
 668                        output_tokens: usage.output_tokens.unwrap_or(0),
 669                        cache_creation_input_tokens: 0,
 670                        cache_read_input_tokens: 0,
 671                    })));
 672                }
 673                if self.pending_stop_reason.take() != Some(StopReason::ToolUse) {
 674                    events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::EndTurn)));
 675                }
 676                events
 677            }
 678
 679            copilot::copilot_responses::StreamEvent::Incomplete { response } => {
 680                let reason = response
 681                    .incomplete_details
 682                    .as_ref()
 683                    .and_then(|details| details.reason.as_ref());
 684                let stop_reason = match reason {
 685                    Some(copilot::copilot_responses::IncompleteReason::MaxOutputTokens) => {
 686                        StopReason::MaxTokens
 687                    }
 688                    Some(copilot::copilot_responses::IncompleteReason::ContentFilter) => {
 689                        StopReason::Refusal
 690                    }
 691                    _ => self
 692                        .pending_stop_reason
 693                        .take()
 694                        .unwrap_or(StopReason::EndTurn),
 695                };
 696
 697                let mut events = Vec::new();
 698                if let Some(usage) = response.usage {
 699                    events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
 700                        input_tokens: usage.input_tokens.unwrap_or(0),
 701                        output_tokens: usage.output_tokens.unwrap_or(0),
 702                        cache_creation_input_tokens: 0,
 703                        cache_read_input_tokens: 0,
 704                    })));
 705                }
 706                events.push(Ok(LanguageModelCompletionEvent::Stop(stop_reason)));
 707                events
 708            }
 709
 710            copilot::copilot_responses::StreamEvent::Failed { response } => {
 711                let provider = PROVIDER_NAME;
 712                let (status_code, message) = match response.error {
 713                    Some(error) => {
 714                        let status_code = StatusCode::from_str(&error.code)
 715                            .unwrap_or(StatusCode::INTERNAL_SERVER_ERROR);
 716                        (status_code, error.message)
 717                    }
 718                    None => (
 719                        StatusCode::INTERNAL_SERVER_ERROR,
 720                        "response.failed".to_string(),
 721                    ),
 722                };
 723                vec![Err(LanguageModelCompletionError::HttpResponseError {
 724                    provider,
 725                    status_code,
 726                    message,
 727                })]
 728            }
 729
 730            copilot::copilot_responses::StreamEvent::GenericError { error } => vec![Err(
 731                LanguageModelCompletionError::Other(anyhow!(format!("{error:?}"))),
 732            )],
 733
 734            copilot::copilot_responses::StreamEvent::Created { .. }
 735            | copilot::copilot_responses::StreamEvent::Unknown => Vec::new(),
 736        }
 737    }
 738}
 739
 740fn into_copilot_chat(
 741    model: &copilot::copilot_chat::Model,
 742    request: LanguageModelRequest,
 743) -> Result<CopilotChatRequest> {
 744    let mut request_messages: Vec<LanguageModelRequestMessage> = Vec::new();
 745    for message in request.messages {
 746        if let Some(last_message) = request_messages.last_mut() {
 747            if last_message.role == message.role {
 748                last_message.content.extend(message.content);
 749            } else {
 750                request_messages.push(message);
 751            }
 752        } else {
 753            request_messages.push(message);
 754        }
 755    }
 756
 757    let mut messages: Vec<ChatMessage> = Vec::new();
 758    for message in request_messages {
 759        match message.role {
 760            Role::User => {
 761                for content in &message.content {
 762                    if let MessageContent::ToolResult(tool_result) = content {
 763                        let content = match &tool_result.content {
 764                            LanguageModelToolResultContent::Text(text) => text.to_string().into(),
 765                            LanguageModelToolResultContent::Image(image) => {
 766                                if model.supports_vision() {
 767                                    ChatMessageContent::Multipart(vec![ChatMessagePart::Image {
 768                                        image_url: ImageUrl {
 769                                            url: image.to_base64_url(),
 770                                        },
 771                                    }])
 772                                } else {
 773                                    debug_panic!(
 774                                        "This should be caught at {} level",
 775                                        tool_result.tool_name
 776                                    );
 777                                    "[Tool responded with an image, but this model does not support vision]".to_string().into()
 778                                }
 779                            }
 780                        };
 781
 782                        messages.push(ChatMessage::Tool {
 783                            tool_call_id: tool_result.tool_use_id.to_string(),
 784                            content,
 785                        });
 786                    }
 787                }
 788
 789                let mut content_parts = Vec::new();
 790                for content in &message.content {
 791                    match content {
 792                        MessageContent::Text(text) | MessageContent::Thinking { text, .. }
 793                            if !text.is_empty() =>
 794                        {
 795                            if let Some(ChatMessagePart::Text { text: text_content }) =
 796                                content_parts.last_mut()
 797                            {
 798                                text_content.push_str(text);
 799                            } else {
 800                                content_parts.push(ChatMessagePart::Text {
 801                                    text: text.to_string(),
 802                                });
 803                            }
 804                        }
 805                        MessageContent::Image(image) if model.supports_vision() => {
 806                            content_parts.push(ChatMessagePart::Image {
 807                                image_url: ImageUrl {
 808                                    url: image.to_base64_url(),
 809                                },
 810                            });
 811                        }
 812                        _ => {}
 813                    }
 814                }
 815
 816                if !content_parts.is_empty() {
 817                    messages.push(ChatMessage::User {
 818                        content: content_parts.into(),
 819                    });
 820                }
 821            }
 822            Role::Assistant => {
 823                let mut tool_calls = Vec::new();
 824                for content in &message.content {
 825                    if let MessageContent::ToolUse(tool_use) = content {
 826                        tool_calls.push(ToolCall {
 827                            id: tool_use.id.to_string(),
 828                            content: copilot::copilot_chat::ToolCallContent::Function {
 829                                function: copilot::copilot_chat::FunctionContent {
 830                                    name: tool_use.name.to_string(),
 831                                    arguments: serde_json::to_string(&tool_use.input)?,
 832                                    thought_signature: tool_use.thought_signature.clone(),
 833                                },
 834                            },
 835                        });
 836                    }
 837                }
 838
 839                let text_content = {
 840                    let mut buffer = String::new();
 841                    for string in message.content.iter().filter_map(|content| match content {
 842                        MessageContent::Text(text) | MessageContent::Thinking { text, .. } => {
 843                            Some(text.as_str())
 844                        }
 845                        MessageContent::ToolUse(_)
 846                        | MessageContent::RedactedThinking(_)
 847                        | MessageContent::ToolResult(_)
 848                        | MessageContent::Image(_) => None,
 849                    }) {
 850                        buffer.push_str(string);
 851                    }
 852
 853                    buffer
 854                };
 855
 856                // Extract reasoning_opaque and reasoning_text from reasoning_details
 857                let (reasoning_opaque, reasoning_text) =
 858                    if let Some(details) = &message.reasoning_details {
 859                        let opaque = details
 860                            .get("reasoning_opaque")
 861                            .and_then(|v| v.as_str())
 862                            .map(|s| s.to_string());
 863                        let text = details
 864                            .get("reasoning_text")
 865                            .and_then(|v| v.as_str())
 866                            .map(|s| s.to_string());
 867                        (opaque, text)
 868                    } else {
 869                        (None, None)
 870                    };
 871
 872                messages.push(ChatMessage::Assistant {
 873                    content: if text_content.is_empty() {
 874                        ChatMessageContent::empty()
 875                    } else {
 876                        text_content.into()
 877                    },
 878                    tool_calls,
 879                    reasoning_opaque,
 880                    reasoning_text,
 881                });
 882            }
 883            Role::System => messages.push(ChatMessage::System {
 884                content: message.string_contents(),
 885            }),
 886        }
 887    }
 888
 889    let tools = request
 890        .tools
 891        .iter()
 892        .map(|tool| Tool::Function {
 893            function: copilot::copilot_chat::Function {
 894                name: tool.name.clone(),
 895                description: tool.description.clone(),
 896                parameters: tool.input_schema.clone(),
 897            },
 898        })
 899        .collect::<Vec<_>>();
 900
 901    Ok(CopilotChatRequest {
 902        intent: true,
 903        n: 1,
 904        stream: model.uses_streaming(),
 905        temperature: 0.1,
 906        model: model.id().to_string(),
 907        messages,
 908        tools,
 909        tool_choice: request.tool_choice.map(|choice| match choice {
 910            LanguageModelToolChoice::Auto => copilot::copilot_chat::ToolChoice::Auto,
 911            LanguageModelToolChoice::Any => copilot::copilot_chat::ToolChoice::Any,
 912            LanguageModelToolChoice::None => copilot::copilot_chat::ToolChoice::None,
 913        }),
 914    })
 915}
 916
 917fn into_copilot_responses(
 918    model: &copilot::copilot_chat::Model,
 919    request: LanguageModelRequest,
 920) -> copilot::copilot_responses::Request {
 921    use copilot::copilot_responses as responses;
 922
 923    let LanguageModelRequest {
 924        thread_id: _,
 925        prompt_id: _,
 926        intent: _,
 927        mode: _,
 928        messages,
 929        tools,
 930        tool_choice,
 931        stop: _,
 932        temperature,
 933        thinking_allowed: _,
 934    } = request;
 935
 936    let mut input_items: Vec<responses::ResponseInputItem> = Vec::new();
 937
 938    for message in messages {
 939        match message.role {
 940            Role::User => {
 941                for content in &message.content {
 942                    if let MessageContent::ToolResult(tool_result) = content {
 943                        let output = if let Some(out) = &tool_result.output {
 944                            match out {
 945                                serde_json::Value::String(s) => {
 946                                    responses::ResponseFunctionOutput::Text(s.clone())
 947                                }
 948                                serde_json::Value::Null => {
 949                                    responses::ResponseFunctionOutput::Text(String::new())
 950                                }
 951                                other => responses::ResponseFunctionOutput::Text(other.to_string()),
 952                            }
 953                        } else {
 954                            match &tool_result.content {
 955                                LanguageModelToolResultContent::Text(text) => {
 956                                    responses::ResponseFunctionOutput::Text(text.to_string())
 957                                }
 958                                LanguageModelToolResultContent::Image(image) => {
 959                                    if model.supports_vision() {
 960                                        responses::ResponseFunctionOutput::Content(vec![
 961                                            responses::ResponseInputContent::InputImage {
 962                                                image_url: Some(image.to_base64_url()),
 963                                                detail: Default::default(),
 964                                            },
 965                                        ])
 966                                    } else {
 967                                        debug_panic!(
 968                                            "This should be caught at {} level",
 969                                            tool_result.tool_name
 970                                        );
 971                                        responses::ResponseFunctionOutput::Text(
 972                                            "[Tool responded with an image, but this model does not support vision]".into(),
 973                                        )
 974                                    }
 975                                }
 976                            }
 977                        };
 978
 979                        input_items.push(responses::ResponseInputItem::FunctionCallOutput {
 980                            call_id: tool_result.tool_use_id.to_string(),
 981                            output,
 982                            status: None,
 983                        });
 984                    }
 985                }
 986
 987                let mut parts: Vec<responses::ResponseInputContent> = Vec::new();
 988                for content in &message.content {
 989                    match content {
 990                        MessageContent::Text(text) => {
 991                            parts.push(responses::ResponseInputContent::InputText {
 992                                text: text.clone(),
 993                            });
 994                        }
 995
 996                        MessageContent::Image(image) => {
 997                            if model.supports_vision() {
 998                                parts.push(responses::ResponseInputContent::InputImage {
 999                                    image_url: Some(image.to_base64_url()),
1000                                    detail: Default::default(),
1001                                });
1002                            }
1003                        }
1004                        _ => {}
1005                    }
1006                }
1007
1008                if !parts.is_empty() {
1009                    input_items.push(responses::ResponseInputItem::Message {
1010                        role: "user".into(),
1011                        content: Some(parts),
1012                        status: None,
1013                    });
1014                }
1015            }
1016
1017            Role::Assistant => {
1018                for content in &message.content {
1019                    if let MessageContent::ToolUse(tool_use) = content {
1020                        input_items.push(responses::ResponseInputItem::FunctionCall {
1021                            call_id: tool_use.id.to_string(),
1022                            name: tool_use.name.to_string(),
1023                            arguments: tool_use.raw_input.clone(),
1024                            status: None,
1025                            thought_signature: tool_use.thought_signature.clone(),
1026                        });
1027                    }
1028                }
1029
1030                for content in &message.content {
1031                    if let MessageContent::RedactedThinking(data) = content {
1032                        input_items.push(responses::ResponseInputItem::Reasoning {
1033                            id: None,
1034                            summary: Vec::new(),
1035                            encrypted_content: data.clone(),
1036                        });
1037                    }
1038                }
1039
1040                let mut parts: Vec<responses::ResponseInputContent> = Vec::new();
1041                for content in &message.content {
1042                    match content {
1043                        MessageContent::Text(text) => {
1044                            parts.push(responses::ResponseInputContent::OutputText {
1045                                text: text.clone(),
1046                            });
1047                        }
1048                        MessageContent::Image(_) => {
1049                            parts.push(responses::ResponseInputContent::OutputText {
1050                                text: "[image omitted]".to_string(),
1051                            });
1052                        }
1053                        _ => {}
1054                    }
1055                }
1056
1057                if !parts.is_empty() {
1058                    input_items.push(responses::ResponseInputItem::Message {
1059                        role: "assistant".into(),
1060                        content: Some(parts),
1061                        status: Some("completed".into()),
1062                    });
1063                }
1064            }
1065
1066            Role::System => {
1067                let mut parts: Vec<responses::ResponseInputContent> = Vec::new();
1068                for content in &message.content {
1069                    if let MessageContent::Text(text) = content {
1070                        parts.push(responses::ResponseInputContent::InputText {
1071                            text: text.clone(),
1072                        });
1073                    }
1074                }
1075
1076                if !parts.is_empty() {
1077                    input_items.push(responses::ResponseInputItem::Message {
1078                        role: "system".into(),
1079                        content: Some(parts),
1080                        status: None,
1081                    });
1082                }
1083            }
1084        }
1085    }
1086
1087    let converted_tools: Vec<responses::ToolDefinition> = tools
1088        .into_iter()
1089        .map(|tool| responses::ToolDefinition::Function {
1090            name: tool.name,
1091            description: Some(tool.description),
1092            parameters: Some(tool.input_schema),
1093            strict: None,
1094        })
1095        .collect();
1096
1097    let mapped_tool_choice = tool_choice.map(|choice| match choice {
1098        LanguageModelToolChoice::Auto => responses::ToolChoice::Auto,
1099        LanguageModelToolChoice::Any => responses::ToolChoice::Any,
1100        LanguageModelToolChoice::None => responses::ToolChoice::None,
1101    });
1102
1103    responses::Request {
1104        model: model.id().to_string(),
1105        input: input_items,
1106        stream: model.uses_streaming(),
1107        temperature,
1108        tools: converted_tools,
1109        tool_choice: mapped_tool_choice,
1110        reasoning: None, // We would need to add support for setting from user settings.
1111        include: Some(vec![
1112            copilot::copilot_responses::ResponseIncludable::ReasoningEncryptedContent,
1113        ]),
1114    }
1115}
1116
1117#[cfg(test)]
1118mod tests {
1119    use super::*;
1120    use copilot::copilot_responses as responses;
1121    use futures::StreamExt;
1122
1123    fn map_events(events: Vec<responses::StreamEvent>) -> Vec<LanguageModelCompletionEvent> {
1124        futures::executor::block_on(async {
1125            CopilotResponsesEventMapper::new()
1126                .map_stream(Box::pin(futures::stream::iter(events.into_iter().map(Ok))))
1127                .collect::<Vec<_>>()
1128                .await
1129                .into_iter()
1130                .map(Result::unwrap)
1131                .collect()
1132        })
1133    }
1134
1135    #[test]
1136    fn responses_stream_maps_text_and_usage() {
1137        let events = vec![
1138            responses::StreamEvent::OutputItemAdded {
1139                output_index: 0,
1140                sequence_number: None,
1141                item: responses::ResponseOutputItem::Message {
1142                    id: "msg_1".into(),
1143                    role: "assistant".into(),
1144                    content: Some(Vec::new()),
1145                },
1146            },
1147            responses::StreamEvent::OutputTextDelta {
1148                item_id: "msg_1".into(),
1149                output_index: 0,
1150                delta: "Hello".into(),
1151            },
1152            responses::StreamEvent::Completed {
1153                response: responses::Response {
1154                    usage: Some(responses::ResponseUsage {
1155                        input_tokens: Some(5),
1156                        output_tokens: Some(3),
1157                        total_tokens: Some(8),
1158                    }),
1159                    ..Default::default()
1160                },
1161            },
1162        ];
1163
1164        let mapped = map_events(events);
1165        assert!(matches!(
1166            mapped[0],
1167            LanguageModelCompletionEvent::StartMessage { ref message_id } if message_id == "msg_1"
1168        ));
1169        assert!(matches!(
1170            mapped[1],
1171            LanguageModelCompletionEvent::Text(ref text) if text == "Hello"
1172        ));
1173        assert!(matches!(
1174            mapped[2],
1175            LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
1176                input_tokens: 5,
1177                output_tokens: 3,
1178                ..
1179            })
1180        ));
1181        assert!(matches!(
1182            mapped[3],
1183            LanguageModelCompletionEvent::Stop(StopReason::EndTurn)
1184        ));
1185    }
1186
1187    #[test]
1188    fn responses_stream_maps_tool_calls() {
1189        let events = vec![responses::StreamEvent::OutputItemDone {
1190            output_index: 0,
1191            sequence_number: None,
1192            item: responses::ResponseOutputItem::FunctionCall {
1193                id: Some("fn_1".into()),
1194                call_id: "call_1".into(),
1195                name: "do_it".into(),
1196                arguments: "{\"x\":1}".into(),
1197                status: None,
1198                thought_signature: None,
1199            },
1200        }];
1201
1202        let mapped = map_events(events);
1203        assert!(matches!(
1204            mapped[0],
1205            LanguageModelCompletionEvent::ToolUse(ref use_) if use_.id.to_string() == "call_1" && use_.name.as_ref() == "do_it"
1206        ));
1207        assert!(matches!(
1208            mapped[1],
1209            LanguageModelCompletionEvent::Stop(StopReason::ToolUse)
1210        ));
1211    }
1212
1213    #[test]
1214    fn responses_stream_handles_json_parse_error() {
1215        let events = vec![responses::StreamEvent::OutputItemDone {
1216            output_index: 0,
1217            sequence_number: None,
1218            item: responses::ResponseOutputItem::FunctionCall {
1219                id: Some("fn_1".into()),
1220                call_id: "call_1".into(),
1221                name: "do_it".into(),
1222                arguments: "{not json}".into(),
1223                status: None,
1224                thought_signature: None,
1225            },
1226        }];
1227
1228        let mapped = map_events(events);
1229        assert!(matches!(
1230            mapped[0],
1231            LanguageModelCompletionEvent::ToolUseJsonParseError { ref id, ref tool_name, .. }
1232                if id.to_string() == "call_1" && tool_name.as_ref() == "do_it"
1233        ));
1234        assert!(matches!(
1235            mapped[1],
1236            LanguageModelCompletionEvent::Stop(StopReason::ToolUse)
1237        ));
1238    }
1239
1240    #[test]
1241    fn responses_stream_maps_reasoning_summary_and_encrypted_content() {
1242        let events = vec![responses::StreamEvent::OutputItemDone {
1243            output_index: 0,
1244            sequence_number: None,
1245            item: responses::ResponseOutputItem::Reasoning {
1246                id: "r1".into(),
1247                summary: Some(vec![responses::ResponseReasoningItem {
1248                    kind: "summary_text".into(),
1249                    text: "Chain".into(),
1250                }]),
1251                encrypted_content: Some("ENC".into()),
1252            },
1253        }];
1254
1255        let mapped = map_events(events);
1256        assert!(matches!(
1257            mapped[0],
1258            LanguageModelCompletionEvent::Thinking { ref text, signature: None } if text == "Chain"
1259        ));
1260        assert!(matches!(
1261            mapped[1],
1262            LanguageModelCompletionEvent::RedactedThinking { ref data } if data == "ENC"
1263        ));
1264    }
1265
1266    #[test]
1267    fn responses_stream_handles_incomplete_max_tokens() {
1268        let events = vec![responses::StreamEvent::Incomplete {
1269            response: responses::Response {
1270                usage: Some(responses::ResponseUsage {
1271                    input_tokens: Some(10),
1272                    output_tokens: Some(0),
1273                    total_tokens: Some(10),
1274                }),
1275                incomplete_details: Some(responses::IncompleteDetails {
1276                    reason: Some(responses::IncompleteReason::MaxOutputTokens),
1277                }),
1278                ..Default::default()
1279            },
1280        }];
1281
1282        let mapped = map_events(events);
1283        assert!(matches!(
1284            mapped[0],
1285            LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
1286                input_tokens: 10,
1287                output_tokens: 0,
1288                ..
1289            })
1290        ));
1291        assert!(matches!(
1292            mapped[1],
1293            LanguageModelCompletionEvent::Stop(StopReason::MaxTokens)
1294        ));
1295    }
1296
1297    #[test]
1298    fn responses_stream_handles_incomplete_content_filter() {
1299        let events = vec![responses::StreamEvent::Incomplete {
1300            response: responses::Response {
1301                usage: None,
1302                incomplete_details: Some(responses::IncompleteDetails {
1303                    reason: Some(responses::IncompleteReason::ContentFilter),
1304                }),
1305                ..Default::default()
1306            },
1307        }];
1308
1309        let mapped = map_events(events);
1310        assert!(matches!(
1311            mapped.last().unwrap(),
1312            LanguageModelCompletionEvent::Stop(StopReason::Refusal)
1313        ));
1314    }
1315
1316    #[test]
1317    fn responses_stream_completed_no_duplicate_after_tool_use() {
1318        let events = vec![
1319            responses::StreamEvent::OutputItemDone {
1320                output_index: 0,
1321                sequence_number: None,
1322                item: responses::ResponseOutputItem::FunctionCall {
1323                    id: Some("fn_1".into()),
1324                    call_id: "call_1".into(),
1325                    name: "do_it".into(),
1326                    arguments: "{}".into(),
1327                    status: None,
1328                    thought_signature: None,
1329                },
1330            },
1331            responses::StreamEvent::Completed {
1332                response: responses::Response::default(),
1333            },
1334        ];
1335
1336        let mapped = map_events(events);
1337
1338        let mut stop_count = 0usize;
1339        let mut saw_tool_use_stop = false;
1340        for event in mapped {
1341            if let LanguageModelCompletionEvent::Stop(reason) = event {
1342                stop_count += 1;
1343                if matches!(reason, StopReason::ToolUse) {
1344                    saw_tool_use_stop = true;
1345                }
1346            }
1347        }
1348        assert_eq!(stop_count, 1, "should emit exactly one Stop event");
1349        assert!(saw_tool_use_stop, "Stop reason should be ToolUse");
1350    }
1351
1352    #[test]
1353    fn responses_stream_failed_maps_http_response_error() {
1354        let events = vec![responses::StreamEvent::Failed {
1355            response: responses::Response {
1356                error: Some(responses::ResponseError {
1357                    code: "429".into(),
1358                    message: "too many requests".into(),
1359                }),
1360                ..Default::default()
1361            },
1362        }];
1363
1364        let mapped_results = futures::executor::block_on(async {
1365            CopilotResponsesEventMapper::new()
1366                .map_stream(Box::pin(futures::stream::iter(events.into_iter().map(Ok))))
1367                .collect::<Vec<_>>()
1368                .await
1369        });
1370
1371        assert_eq!(mapped_results.len(), 1);
1372        match &mapped_results[0] {
1373            Err(LanguageModelCompletionError::HttpResponseError {
1374                status_code,
1375                message,
1376                ..
1377            }) => {
1378                assert_eq!(*status_code, http_client::StatusCode::TOO_MANY_REQUESTS);
1379                assert_eq!(message, "too many requests");
1380            }
1381            other => panic!("expected HttpResponseError, got {:?}", other),
1382        }
1383    }
1384
1385    #[test]
1386    fn chat_completions_stream_maps_reasoning_data() {
1387        use copilot::copilot_chat::ResponseEvent;
1388
1389        let events = vec![
1390            ResponseEvent {
1391                choices: vec![copilot::copilot_chat::ResponseChoice {
1392                    index: Some(0),
1393                    finish_reason: None,
1394                    delta: Some(copilot::copilot_chat::ResponseDelta {
1395                        content: None,
1396                        role: Some(copilot::copilot_chat::Role::Assistant),
1397                        tool_calls: vec![copilot::copilot_chat::ToolCallChunk {
1398                            index: Some(0),
1399                            id: Some("call_abc123".to_string()),
1400                            function: Some(copilot::copilot_chat::FunctionChunk {
1401                                name: Some("list_directory".to_string()),
1402                                arguments: Some("{\"path\":\"test\"}".to_string()),
1403                                thought_signature: None,
1404                            }),
1405                        }],
1406                        reasoning_opaque: Some("encrypted_reasoning_token_xyz".to_string()),
1407                        reasoning_text: Some("Let me check the directory".to_string()),
1408                    }),
1409                    message: None,
1410                }],
1411                id: "chatcmpl-123".to_string(),
1412                usage: None,
1413            },
1414            ResponseEvent {
1415                choices: vec![copilot::copilot_chat::ResponseChoice {
1416                    index: Some(0),
1417                    finish_reason: Some("tool_calls".to_string()),
1418                    delta: Some(copilot::copilot_chat::ResponseDelta {
1419                        content: None,
1420                        role: None,
1421                        tool_calls: vec![],
1422                        reasoning_opaque: None,
1423                        reasoning_text: None,
1424                    }),
1425                    message: None,
1426                }],
1427                id: "chatcmpl-123".to_string(),
1428                usage: None,
1429            },
1430        ];
1431
1432        let mapped = futures::executor::block_on(async {
1433            map_to_language_model_completion_events(
1434                Box::pin(futures::stream::iter(events.into_iter().map(Ok))),
1435                true,
1436            )
1437            .collect::<Vec<_>>()
1438            .await
1439        });
1440
1441        let mut has_reasoning_details = false;
1442        let mut has_tool_use = false;
1443        let mut reasoning_opaque_value: Option<String> = None;
1444        let mut reasoning_text_value: Option<String> = None;
1445
1446        for event_result in mapped {
1447            match event_result {
1448                Ok(LanguageModelCompletionEvent::ReasoningDetails(details)) => {
1449                    has_reasoning_details = true;
1450                    reasoning_opaque_value = details
1451                        .get("reasoning_opaque")
1452                        .and_then(|v| v.as_str())
1453                        .map(|s| s.to_string());
1454                    reasoning_text_value = details
1455                        .get("reasoning_text")
1456                        .and_then(|v| v.as_str())
1457                        .map(|s| s.to_string());
1458                }
1459                Ok(LanguageModelCompletionEvent::ToolUse(tool_use)) => {
1460                    has_tool_use = true;
1461                    assert_eq!(tool_use.id.to_string(), "call_abc123");
1462                    assert_eq!(tool_use.name.as_ref(), "list_directory");
1463                }
1464                _ => {}
1465            }
1466        }
1467
1468        assert!(
1469            has_reasoning_details,
1470            "Should emit ReasoningDetails event for Gemini 3 reasoning"
1471        );
1472        assert!(has_tool_use, "Should emit ToolUse event");
1473        assert_eq!(
1474            reasoning_opaque_value,
1475            Some("encrypted_reasoning_token_xyz".to_string()),
1476            "Should capture reasoning_opaque"
1477        );
1478        assert_eq!(
1479            reasoning_text_value,
1480            Some("Let me check the directory".to_string()),
1481            "Should capture reasoning_text"
1482        );
1483    }
1484}