copilot_chat.rs

   1use std::pin::Pin;
   2use std::str::FromStr as _;
   3use std::sync::Arc;
   4
   5use anyhow::{Result, anyhow};
   6use cloud_llm_client::CompletionIntent;
   7use collections::HashMap;
   8use copilot::{Copilot, Status};
   9use copilot_chat::responses as copilot_responses;
  10use copilot_chat::{
  11    ChatMessage, ChatMessageContent, ChatMessagePart, CopilotChat, CopilotChatConfiguration,
  12    Function, FunctionContent, ImageUrl, Model as CopilotChatModel, ModelVendor,
  13    Request as CopilotChatRequest, ResponseEvent, Tool, ToolCall, ToolCallContent, ToolChoice,
  14};
  15use futures::future::BoxFuture;
  16use futures::stream::BoxStream;
  17use futures::{FutureExt, Stream, StreamExt};
  18use gpui::{AnyView, App, AsyncApp, Entity, Subscription, Task};
  19use http_client::StatusCode;
  20use language::language_settings::all_language_settings;
  21use language_model::{
  22    AuthenticateError, IconOrSvg, LanguageModel, LanguageModelCompletionError,
  23    LanguageModelCompletionEvent, LanguageModelId, LanguageModelName, LanguageModelProvider,
  24    LanguageModelProviderId, LanguageModelProviderName, LanguageModelProviderState,
  25    LanguageModelRequest, LanguageModelRequestMessage, LanguageModelToolChoice,
  26    LanguageModelToolResultContent, LanguageModelToolSchemaFormat, LanguageModelToolUse,
  27    MessageContent, RateLimiter, Role, StopReason, TokenUsage,
  28};
  29use settings::SettingsStore;
  30use ui::prelude::*;
  31use util::debug_panic;
  32
  33const PROVIDER_ID: LanguageModelProviderId = LanguageModelProviderId::new("copilot_chat");
  34const PROVIDER_NAME: LanguageModelProviderName =
  35    LanguageModelProviderName::new("GitHub Copilot Chat");
  36
  37pub struct CopilotChatLanguageModelProvider {
  38    state: Entity<State>,
  39}
  40
  41pub struct State {
  42    _copilot_chat_subscription: Option<Subscription>,
  43    _settings_subscription: Subscription,
  44}
  45
  46impl State {
  47    fn is_authenticated(&self, cx: &App) -> bool {
  48        CopilotChat::global(cx)
  49            .map(|m| m.read(cx).is_authenticated())
  50            .unwrap_or(false)
  51    }
  52}
  53
  54impl CopilotChatLanguageModelProvider {
  55    pub fn new(cx: &mut App) -> Self {
  56        let state = cx.new(|cx| {
  57            let copilot_chat_subscription = CopilotChat::global(cx)
  58                .map(|copilot_chat| cx.observe(&copilot_chat, |_, _, cx| cx.notify()));
  59            State {
  60                _copilot_chat_subscription: copilot_chat_subscription,
  61                _settings_subscription: cx.observe_global::<SettingsStore>(|_, cx| {
  62                    if let Some(copilot_chat) = CopilotChat::global(cx) {
  63                        let language_settings = all_language_settings(None, cx);
  64                        let configuration = CopilotChatConfiguration {
  65                            enterprise_uri: language_settings
  66                                .edit_predictions
  67                                .copilot
  68                                .enterprise_uri
  69                                .clone(),
  70                        };
  71                        copilot_chat.update(cx, |chat, cx| {
  72                            chat.set_configuration(configuration, cx);
  73                        });
  74                    }
  75                    cx.notify();
  76                }),
  77            }
  78        });
  79
  80        Self { state }
  81    }
  82
  83    fn create_language_model(&self, model: CopilotChatModel) -> Arc<dyn LanguageModel> {
  84        Arc::new(CopilotChatLanguageModel {
  85            model,
  86            request_limiter: RateLimiter::new(4),
  87        })
  88    }
  89}
  90
  91impl LanguageModelProviderState for CopilotChatLanguageModelProvider {
  92    type ObservableEntity = State;
  93
  94    fn observable_entity(&self) -> Option<Entity<Self::ObservableEntity>> {
  95        Some(self.state.clone())
  96    }
  97}
  98
  99impl LanguageModelProvider for CopilotChatLanguageModelProvider {
 100    fn id(&self) -> LanguageModelProviderId {
 101        PROVIDER_ID
 102    }
 103
 104    fn name(&self) -> LanguageModelProviderName {
 105        PROVIDER_NAME
 106    }
 107
 108    fn icon(&self) -> IconOrSvg {
 109        IconOrSvg::Icon(IconName::Copilot)
 110    }
 111
 112    fn default_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
 113        let models = CopilotChat::global(cx).and_then(|m| m.read(cx).models())?;
 114        models
 115            .first()
 116            .map(|model| self.create_language_model(model.clone()))
 117    }
 118
 119    fn default_fast_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
 120        // The default model should be Copilot Chat's 'base model', which is likely a relatively fast
 121        // model (e.g. 4o) and a sensible choice when considering premium requests
 122        self.default_model(cx)
 123    }
 124
 125    fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
 126        let Some(models) = CopilotChat::global(cx).and_then(|m| m.read(cx).models()) else {
 127            return Vec::new();
 128        };
 129        models
 130            .iter()
 131            .map(|model| self.create_language_model(model.clone()))
 132            .collect()
 133    }
 134
 135    fn is_authenticated(&self, cx: &App) -> bool {
 136        self.state.read(cx).is_authenticated(cx)
 137    }
 138
 139    fn authenticate(&self, cx: &mut App) -> Task<Result<(), AuthenticateError>> {
 140        if self.is_authenticated(cx) {
 141            return Task::ready(Ok(()));
 142        };
 143
 144        let Some(copilot) = Copilot::global(cx) else {
 145            return Task::ready(Err(anyhow!(concat!(
 146                "Copilot must be enabled for Copilot Chat to work. ",
 147                "Please enable Copilot and try again."
 148            ))
 149            .into()));
 150        };
 151
 152        let err = match copilot.read(cx).status() {
 153            Status::Authorized => return Task::ready(Ok(())),
 154            Status::Disabled => anyhow!(
 155                "Copilot must be enabled for Copilot Chat to work. Please enable Copilot and try again."
 156            ),
 157            Status::Error(err) => anyhow!(format!(
 158                "Received the following error while signing into Copilot: {err}"
 159            )),
 160            Status::Starting { task: _ } => anyhow!(
 161                "Copilot is still starting, please wait for Copilot to start then try again"
 162            ),
 163            Status::Unauthorized => anyhow!(
 164                "Unable to authorize with Copilot. Please make sure that you have an active Copilot and Copilot Chat subscription."
 165            ),
 166            Status::SignedOut { .. } => {
 167                anyhow!("You have signed out of Copilot. Please sign in to Copilot and try again.")
 168            }
 169            Status::SigningIn { prompt: _ } => anyhow!("Still signing into Copilot..."),
 170        };
 171
 172        Task::ready(Err(err.into()))
 173    }
 174
 175    fn configuration_view(
 176        &self,
 177        _target_agent: language_model::ConfigurationViewTargetAgent,
 178        _: &mut Window,
 179        cx: &mut App,
 180    ) -> AnyView {
 181        cx.new(|cx| {
 182            copilot_ui::ConfigurationView::new(
 183                |cx| {
 184                    CopilotChat::global(cx)
 185                        .map(|m| m.read(cx).is_authenticated())
 186                        .unwrap_or(false)
 187                },
 188                copilot_ui::ConfigurationMode::Chat,
 189                cx,
 190            )
 191        })
 192        .into()
 193    }
 194
 195    fn reset_credentials(&self, _cx: &mut App) -> Task<Result<()>> {
 196        Task::ready(Err(anyhow!(
 197            "Signing out of GitHub Copilot Chat is currently not supported."
 198        )))
 199    }
 200}
 201
 202fn collect_tiktoken_messages(
 203    request: LanguageModelRequest,
 204) -> Vec<tiktoken_rs::ChatCompletionRequestMessage> {
 205    request
 206        .messages
 207        .into_iter()
 208        .map(|message| tiktoken_rs::ChatCompletionRequestMessage {
 209            role: match message.role {
 210                Role::User => "user".into(),
 211                Role::Assistant => "assistant".into(),
 212                Role::System => "system".into(),
 213            },
 214            content: Some(message.string_contents()),
 215            name: None,
 216            function_call: None,
 217        })
 218        .collect::<Vec<_>>()
 219}
 220
 221pub struct CopilotChatLanguageModel {
 222    model: CopilotChatModel,
 223    request_limiter: RateLimiter,
 224}
 225
 226impl LanguageModel for CopilotChatLanguageModel {
 227    fn id(&self) -> LanguageModelId {
 228        LanguageModelId::from(self.model.id().to_string())
 229    }
 230
 231    fn name(&self) -> LanguageModelName {
 232        LanguageModelName::from(self.model.display_name().to_string())
 233    }
 234
 235    fn provider_id(&self) -> LanguageModelProviderId {
 236        PROVIDER_ID
 237    }
 238
 239    fn provider_name(&self) -> LanguageModelProviderName {
 240        PROVIDER_NAME
 241    }
 242
 243    fn supports_tools(&self) -> bool {
 244        self.model.supports_tools()
 245    }
 246
 247    fn supports_images(&self) -> bool {
 248        self.model.supports_vision()
 249    }
 250
 251    fn tool_input_format(&self) -> LanguageModelToolSchemaFormat {
 252        match self.model.vendor() {
 253            ModelVendor::OpenAI | ModelVendor::Anthropic => {
 254                LanguageModelToolSchemaFormat::JsonSchema
 255            }
 256            ModelVendor::Google | ModelVendor::XAI | ModelVendor::Unknown => {
 257                LanguageModelToolSchemaFormat::JsonSchemaSubset
 258            }
 259        }
 260    }
 261
 262    fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
 263        match choice {
 264            LanguageModelToolChoice::Auto
 265            | LanguageModelToolChoice::Any
 266            | LanguageModelToolChoice::None => self.supports_tools(),
 267        }
 268    }
 269
 270    fn telemetry_id(&self) -> String {
 271        format!("copilot_chat/{}", self.model.id())
 272    }
 273
 274    fn max_token_count(&self) -> u64 {
 275        self.model.max_token_count()
 276    }
 277
 278    fn count_tokens(
 279        &self,
 280        request: LanguageModelRequest,
 281        cx: &App,
 282    ) -> BoxFuture<'static, Result<u64>> {
 283        let model = self.model.clone();
 284        cx.background_spawn(async move {
 285            let messages = collect_tiktoken_messages(request);
 286            // Copilot uses OpenAI tiktoken tokenizer for all it's model irrespective of the underlying provider(vendor).
 287            let tokenizer_model = match model.tokenizer() {
 288                Some("o200k_base") => "gpt-4o",
 289                Some("cl100k_base") => "gpt-4",
 290                _ => "gpt-4o",
 291            };
 292
 293            tiktoken_rs::num_tokens_from_messages(tokenizer_model, &messages)
 294                .map(|tokens| tokens as u64)
 295        })
 296        .boxed()
 297    }
 298
 299    fn stream_completion(
 300        &self,
 301        request: LanguageModelRequest,
 302        cx: &AsyncApp,
 303    ) -> BoxFuture<
 304        'static,
 305        Result<
 306            BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>,
 307            LanguageModelCompletionError,
 308        >,
 309    > {
 310        let is_user_initiated = request.intent.is_none_or(|intent| match intent {
 311            CompletionIntent::UserPrompt
 312            | CompletionIntent::ThreadContextSummarization
 313            | CompletionIntent::InlineAssist
 314            | CompletionIntent::TerminalInlineAssist
 315            | CompletionIntent::GenerateGitCommitMessage => true,
 316
 317            CompletionIntent::ToolResults
 318            | CompletionIntent::ThreadSummarization
 319            | CompletionIntent::CreateFile
 320            | CompletionIntent::EditFile => false,
 321        });
 322
 323        if self.model.supports_response() {
 324            let responses_request = into_copilot_responses(&self.model, request);
 325            let request_limiter = self.request_limiter.clone();
 326            let future = cx.spawn(async move |cx| {
 327                let request =
 328                    CopilotChat::stream_response(responses_request, is_user_initiated, cx.clone());
 329                request_limiter
 330                    .stream(async move {
 331                        let stream = request.await?;
 332                        let mapper = CopilotResponsesEventMapper::new();
 333                        Ok(mapper.map_stream(stream).boxed())
 334                    })
 335                    .await
 336            });
 337            return async move { Ok(future.await?.boxed()) }.boxed();
 338        }
 339
 340        let copilot_request = match into_copilot_chat(&self.model, request) {
 341            Ok(request) => request,
 342            Err(err) => return futures::future::ready(Err(err.into())).boxed(),
 343        };
 344        let is_streaming = copilot_request.stream;
 345
 346        let request_limiter = self.request_limiter.clone();
 347        let future = cx.spawn(async move |cx| {
 348            let request =
 349                CopilotChat::stream_completion(copilot_request, is_user_initiated, cx.clone());
 350            request_limiter
 351                .stream(async move {
 352                    let response = request.await?;
 353                    Ok(map_to_language_model_completion_events(
 354                        response,
 355                        is_streaming,
 356                    ))
 357                })
 358                .await
 359        });
 360        async move { Ok(future.await?.boxed()) }.boxed()
 361    }
 362}
 363
 364pub fn map_to_language_model_completion_events(
 365    events: Pin<Box<dyn Send + Stream<Item = Result<ResponseEvent>>>>,
 366    is_streaming: bool,
 367) -> impl Stream<Item = Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
 368    #[derive(Default)]
 369    struct RawToolCall {
 370        id: String,
 371        name: String,
 372        arguments: String,
 373        thought_signature: Option<String>,
 374    }
 375
 376    struct State {
 377        events: Pin<Box<dyn Send + Stream<Item = Result<ResponseEvent>>>>,
 378        tool_calls_by_index: HashMap<usize, RawToolCall>,
 379        reasoning_opaque: Option<String>,
 380        reasoning_text: Option<String>,
 381    }
 382
 383    futures::stream::unfold(
 384        State {
 385            events,
 386            tool_calls_by_index: HashMap::default(),
 387            reasoning_opaque: None,
 388            reasoning_text: None,
 389        },
 390        move |mut state| async move {
 391            if let Some(event) = state.events.next().await {
 392                match event {
 393                    Ok(event) => {
 394                        let Some(choice) = event.choices.first() else {
 395                            return Some((
 396                                vec![Err(anyhow!("Response contained no choices").into())],
 397                                state,
 398                            ));
 399                        };
 400
 401                        let delta = if is_streaming {
 402                            choice.delta.as_ref()
 403                        } else {
 404                            choice.message.as_ref()
 405                        };
 406
 407                        let Some(delta) = delta else {
 408                            return Some((
 409                                vec![Err(anyhow!("Response contained no delta").into())],
 410                                state,
 411                            ));
 412                        };
 413
 414                        let mut events = Vec::new();
 415                        if let Some(content) = delta.content.clone() {
 416                            events.push(Ok(LanguageModelCompletionEvent::Text(content)));
 417                        }
 418
 419                        // Capture reasoning data from the delta (e.g. for Gemini 3)
 420                        if let Some(opaque) = delta.reasoning_opaque.clone() {
 421                            state.reasoning_opaque = Some(opaque);
 422                        }
 423                        if let Some(text) = delta.reasoning_text.clone() {
 424                            state.reasoning_text = Some(text);
 425                        }
 426
 427                        for (index, tool_call) in delta.tool_calls.iter().enumerate() {
 428                            let tool_index = tool_call.index.unwrap_or(index);
 429                            let entry = state.tool_calls_by_index.entry(tool_index).or_default();
 430
 431                            if let Some(tool_id) = tool_call.id.clone() {
 432                                entry.id = tool_id;
 433                            }
 434
 435                            if let Some(function) = tool_call.function.as_ref() {
 436                                if let Some(name) = function.name.clone() {
 437                                    entry.name = name;
 438                                }
 439
 440                                if let Some(arguments) = function.arguments.clone() {
 441                                    entry.arguments.push_str(&arguments);
 442                                }
 443
 444                                if let Some(thought_signature) = function.thought_signature.clone()
 445                                {
 446                                    entry.thought_signature = Some(thought_signature);
 447                                }
 448                            }
 449                        }
 450
 451                        if let Some(usage) = event.usage {
 452                            events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(
 453                                TokenUsage {
 454                                    input_tokens: usage.prompt_tokens,
 455                                    output_tokens: usage.completion_tokens,
 456                                    cache_creation_input_tokens: 0,
 457                                    cache_read_input_tokens: 0,
 458                                },
 459                            )));
 460                        }
 461
 462                        match choice.finish_reason.as_deref() {
 463                            Some("stop") => {
 464                                events.push(Ok(LanguageModelCompletionEvent::Stop(
 465                                    StopReason::EndTurn,
 466                                )));
 467                            }
 468                            Some("tool_calls") => {
 469                                // Gemini 3 models send reasoning_opaque/reasoning_text that must
 470                                // be preserved and sent back in subsequent requests. Emit as
 471                                // ReasoningDetails so the agent stores it in the message.
 472                                if state.reasoning_opaque.is_some()
 473                                    || state.reasoning_text.is_some()
 474                                {
 475                                    let mut details = serde_json::Map::new();
 476                                    if let Some(opaque) = state.reasoning_opaque.take() {
 477                                        details.insert(
 478                                            "reasoning_opaque".to_string(),
 479                                            serde_json::Value::String(opaque),
 480                                        );
 481                                    }
 482                                    if let Some(text) = state.reasoning_text.take() {
 483                                        details.insert(
 484                                            "reasoning_text".to_string(),
 485                                            serde_json::Value::String(text),
 486                                        );
 487                                    }
 488                                    events.push(Ok(
 489                                        LanguageModelCompletionEvent::ReasoningDetails(
 490                                            serde_json::Value::Object(details),
 491                                        ),
 492                                    ));
 493                                }
 494
 495                                events.extend(state.tool_calls_by_index.drain().map(
 496                                    |(_, tool_call)| {
 497                                        // The model can output an empty string
 498                                        // to indicate the absence of arguments.
 499                                        // When that happens, create an empty
 500                                        // object instead.
 501                                        let arguments = if tool_call.arguments.is_empty() {
 502                                            Ok(serde_json::Value::Object(Default::default()))
 503                                        } else {
 504                                            serde_json::Value::from_str(&tool_call.arguments)
 505                                        };
 506                                        match arguments {
 507                                        Ok(input) => Ok(LanguageModelCompletionEvent::ToolUse(
 508                                            LanguageModelToolUse {
 509                                                id: tool_call.id.into(),
 510                                                name: tool_call.name.as_str().into(),
 511                                                is_input_complete: true,
 512                                                input,
 513                                                raw_input: tool_call.arguments,
 514                                                thought_signature: tool_call.thought_signature,
 515                                            },
 516                                        )),
 517                                        Err(error) => Ok(
 518                                            LanguageModelCompletionEvent::ToolUseJsonParseError {
 519                                                id: tool_call.id.into(),
 520                                                tool_name: tool_call.name.as_str().into(),
 521                                                raw_input: tool_call.arguments.into(),
 522                                                json_parse_error: error.to_string(),
 523                                            },
 524                                        ),
 525                                    }
 526                                    },
 527                                ));
 528
 529                                events.push(Ok(LanguageModelCompletionEvent::Stop(
 530                                    StopReason::ToolUse,
 531                                )));
 532                            }
 533                            Some(stop_reason) => {
 534                                log::error!("Unexpected Copilot Chat stop_reason: {stop_reason:?}");
 535                                events.push(Ok(LanguageModelCompletionEvent::Stop(
 536                                    StopReason::EndTurn,
 537                                )));
 538                            }
 539                            None => {}
 540                        }
 541
 542                        return Some((events, state));
 543                    }
 544                    Err(err) => return Some((vec![Err(anyhow!(err).into())], state)),
 545                }
 546            }
 547
 548            None
 549        },
 550    )
 551    .flat_map(futures::stream::iter)
 552}
 553
 554pub struct CopilotResponsesEventMapper {
 555    pending_stop_reason: Option<StopReason>,
 556}
 557
 558impl CopilotResponsesEventMapper {
 559    pub fn new() -> Self {
 560        Self {
 561            pending_stop_reason: None,
 562        }
 563    }
 564
 565    pub fn map_stream(
 566        mut self,
 567        events: Pin<Box<dyn Send + Stream<Item = Result<copilot_responses::StreamEvent>>>>,
 568    ) -> impl Stream<Item = Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
 569    {
 570        events.flat_map(move |event| {
 571            futures::stream::iter(match event {
 572                Ok(event) => self.map_event(event),
 573                Err(error) => vec![Err(LanguageModelCompletionError::from(anyhow!(error)))],
 574            })
 575        })
 576    }
 577
 578    fn map_event(
 579        &mut self,
 580        event: copilot_responses::StreamEvent,
 581    ) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
 582        match event {
 583            copilot_responses::StreamEvent::OutputItemAdded { item, .. } => match item {
 584                copilot_responses::ResponseOutputItem::Message { id, .. } => {
 585                    vec![Ok(LanguageModelCompletionEvent::StartMessage {
 586                        message_id: id,
 587                    })]
 588                }
 589                _ => Vec::new(),
 590            },
 591
 592            copilot_responses::StreamEvent::OutputTextDelta { delta, .. } => {
 593                if delta.is_empty() {
 594                    Vec::new()
 595                } else {
 596                    vec![Ok(LanguageModelCompletionEvent::Text(delta))]
 597                }
 598            }
 599
 600            copilot_responses::StreamEvent::OutputItemDone { item, .. } => match item {
 601                copilot_responses::ResponseOutputItem::Message { .. } => Vec::new(),
 602                copilot_responses::ResponseOutputItem::FunctionCall {
 603                    call_id,
 604                    name,
 605                    arguments,
 606                    thought_signature,
 607                    ..
 608                } => {
 609                    let mut events = Vec::new();
 610                    match serde_json::from_str::<serde_json::Value>(&arguments) {
 611                        Ok(input) => events.push(Ok(LanguageModelCompletionEvent::ToolUse(
 612                            LanguageModelToolUse {
 613                                id: call_id.into(),
 614                                name: name.as_str().into(),
 615                                is_input_complete: true,
 616                                input,
 617                                raw_input: arguments.clone(),
 618                                thought_signature,
 619                            },
 620                        ))),
 621                        Err(error) => {
 622                            events.push(Ok(LanguageModelCompletionEvent::ToolUseJsonParseError {
 623                                id: call_id.into(),
 624                                tool_name: name.as_str().into(),
 625                                raw_input: arguments.clone().into(),
 626                                json_parse_error: error.to_string(),
 627                            }))
 628                        }
 629                    }
 630                    // Record that we already emitted a tool-use stop so we can avoid duplicating
 631                    // a Stop event on Completed.
 632                    self.pending_stop_reason = Some(StopReason::ToolUse);
 633                    events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::ToolUse)));
 634                    events
 635                }
 636                copilot_responses::ResponseOutputItem::Reasoning {
 637                    summary,
 638                    encrypted_content,
 639                    ..
 640                } => {
 641                    let mut events = Vec::new();
 642
 643                    if let Some(blocks) = summary {
 644                        let mut text = String::new();
 645                        for block in blocks {
 646                            text.push_str(&block.text);
 647                        }
 648                        if !text.is_empty() {
 649                            events.push(Ok(LanguageModelCompletionEvent::Thinking {
 650                                text,
 651                                signature: None,
 652                            }));
 653                        }
 654                    }
 655
 656                    if let Some(data) = encrypted_content {
 657                        events.push(Ok(LanguageModelCompletionEvent::RedactedThinking { data }));
 658                    }
 659
 660                    events
 661                }
 662            },
 663
 664            copilot_responses::StreamEvent::Completed { response } => {
 665                let mut events = Vec::new();
 666                if let Some(usage) = response.usage {
 667                    events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
 668                        input_tokens: usage.input_tokens.unwrap_or(0),
 669                        output_tokens: usage.output_tokens.unwrap_or(0),
 670                        cache_creation_input_tokens: 0,
 671                        cache_read_input_tokens: 0,
 672                    })));
 673                }
 674                if self.pending_stop_reason.take() != Some(StopReason::ToolUse) {
 675                    events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::EndTurn)));
 676                }
 677                events
 678            }
 679
 680            copilot_responses::StreamEvent::Incomplete { response } => {
 681                let reason = response
 682                    .incomplete_details
 683                    .as_ref()
 684                    .and_then(|details| details.reason.as_ref());
 685                let stop_reason = match reason {
 686                    Some(copilot_responses::IncompleteReason::MaxOutputTokens) => {
 687                        StopReason::MaxTokens
 688                    }
 689                    Some(copilot_responses::IncompleteReason::ContentFilter) => StopReason::Refusal,
 690                    _ => self
 691                        .pending_stop_reason
 692                        .take()
 693                        .unwrap_or(StopReason::EndTurn),
 694                };
 695
 696                let mut events = Vec::new();
 697                if let Some(usage) = response.usage {
 698                    events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
 699                        input_tokens: usage.input_tokens.unwrap_or(0),
 700                        output_tokens: usage.output_tokens.unwrap_or(0),
 701                        cache_creation_input_tokens: 0,
 702                        cache_read_input_tokens: 0,
 703                    })));
 704                }
 705                events.push(Ok(LanguageModelCompletionEvent::Stop(stop_reason)));
 706                events
 707            }
 708
 709            copilot_responses::StreamEvent::Failed { response } => {
 710                let provider = PROVIDER_NAME;
 711                let (status_code, message) = match response.error {
 712                    Some(error) => {
 713                        let status_code = StatusCode::from_str(&error.code)
 714                            .unwrap_or(StatusCode::INTERNAL_SERVER_ERROR);
 715                        (status_code, error.message)
 716                    }
 717                    None => (
 718                        StatusCode::INTERNAL_SERVER_ERROR,
 719                        "response.failed".to_string(),
 720                    ),
 721                };
 722                vec![Err(LanguageModelCompletionError::HttpResponseError {
 723                    provider,
 724                    status_code,
 725                    message,
 726                })]
 727            }
 728
 729            copilot_responses::StreamEvent::GenericError { error } => vec![Err(
 730                LanguageModelCompletionError::Other(anyhow!(format!("{error:?}"))),
 731            )],
 732
 733            copilot_responses::StreamEvent::Created { .. }
 734            | copilot_responses::StreamEvent::Unknown => Vec::new(),
 735        }
 736    }
 737}
 738
 739fn into_copilot_chat(
 740    model: &CopilotChatModel,
 741    request: LanguageModelRequest,
 742) -> Result<CopilotChatRequest> {
 743    let mut request_messages: Vec<LanguageModelRequestMessage> = Vec::new();
 744    for message in request.messages {
 745        if let Some(last_message) = request_messages.last_mut() {
 746            if last_message.role == message.role {
 747                last_message.content.extend(message.content);
 748            } else {
 749                request_messages.push(message);
 750            }
 751        } else {
 752            request_messages.push(message);
 753        }
 754    }
 755
 756    let mut messages: Vec<ChatMessage> = Vec::new();
 757    for message in request_messages {
 758        match message.role {
 759            Role::User => {
 760                for content in &message.content {
 761                    if let MessageContent::ToolResult(tool_result) = content {
 762                        let content = match &tool_result.content {
 763                            LanguageModelToolResultContent::Text(text) => text.to_string().into(),
 764                            LanguageModelToolResultContent::Image(image) => {
 765                                if model.supports_vision() {
 766                                    ChatMessageContent::Multipart(vec![ChatMessagePart::Image {
 767                                        image_url: ImageUrl {
 768                                            url: image.to_base64_url(),
 769                                        },
 770                                    }])
 771                                } else {
 772                                    debug_panic!(
 773                                        "This should be caught at {} level",
 774                                        tool_result.tool_name
 775                                    );
 776                                    "[Tool responded with an image, but this model does not support vision]".to_string().into()
 777                                }
 778                            }
 779                        };
 780
 781                        messages.push(ChatMessage::Tool {
 782                            tool_call_id: tool_result.tool_use_id.to_string(),
 783                            content,
 784                        });
 785                    }
 786                }
 787
 788                let mut content_parts = Vec::new();
 789                for content in &message.content {
 790                    match content {
 791                        MessageContent::Text(text) | MessageContent::Thinking { text, .. }
 792                            if !text.is_empty() =>
 793                        {
 794                            if let Some(ChatMessagePart::Text { text: text_content }) =
 795                                content_parts.last_mut()
 796                            {
 797                                text_content.push_str(text);
 798                            } else {
 799                                content_parts.push(ChatMessagePart::Text {
 800                                    text: text.to_string(),
 801                                });
 802                            }
 803                        }
 804                        MessageContent::Image(image) if model.supports_vision() => {
 805                            content_parts.push(ChatMessagePart::Image {
 806                                image_url: ImageUrl {
 807                                    url: image.to_base64_url(),
 808                                },
 809                            });
 810                        }
 811                        _ => {}
 812                    }
 813                }
 814
 815                if !content_parts.is_empty() {
 816                    messages.push(ChatMessage::User {
 817                        content: content_parts.into(),
 818                    });
 819                }
 820            }
 821            Role::Assistant => {
 822                let mut tool_calls = Vec::new();
 823                for content in &message.content {
 824                    if let MessageContent::ToolUse(tool_use) = content {
 825                        tool_calls.push(ToolCall {
 826                            id: tool_use.id.to_string(),
 827                            content: ToolCallContent::Function {
 828                                function: FunctionContent {
 829                                    name: tool_use.name.to_string(),
 830                                    arguments: serde_json::to_string(&tool_use.input)?,
 831                                    thought_signature: tool_use.thought_signature.clone(),
 832                                },
 833                            },
 834                        });
 835                    }
 836                }
 837
 838                let text_content = {
 839                    let mut buffer = String::new();
 840                    for string in message.content.iter().filter_map(|content| match content {
 841                        MessageContent::Text(text) | MessageContent::Thinking { text, .. } => {
 842                            Some(text.as_str())
 843                        }
 844                        MessageContent::ToolUse(_)
 845                        | MessageContent::RedactedThinking(_)
 846                        | MessageContent::ToolResult(_)
 847                        | MessageContent::Image(_) => None,
 848                    }) {
 849                        buffer.push_str(string);
 850                    }
 851
 852                    buffer
 853                };
 854
 855                // Extract reasoning_opaque and reasoning_text from reasoning_details
 856                let (reasoning_opaque, reasoning_text) =
 857                    if let Some(details) = &message.reasoning_details {
 858                        let opaque = details
 859                            .get("reasoning_opaque")
 860                            .and_then(|v| v.as_str())
 861                            .map(|s| s.to_string());
 862                        let text = details
 863                            .get("reasoning_text")
 864                            .and_then(|v| v.as_str())
 865                            .map(|s| s.to_string());
 866                        (opaque, text)
 867                    } else {
 868                        (None, None)
 869                    };
 870
 871                messages.push(ChatMessage::Assistant {
 872                    content: if text_content.is_empty() {
 873                        ChatMessageContent::empty()
 874                    } else {
 875                        text_content.into()
 876                    },
 877                    tool_calls,
 878                    reasoning_opaque,
 879                    reasoning_text,
 880                });
 881            }
 882            Role::System => messages.push(ChatMessage::System {
 883                content: message.string_contents(),
 884            }),
 885        }
 886    }
 887
 888    let tools = request
 889        .tools
 890        .iter()
 891        .map(|tool| Tool::Function {
 892            function: Function {
 893                name: tool.name.clone(),
 894                description: tool.description.clone(),
 895                parameters: tool.input_schema.clone(),
 896            },
 897        })
 898        .collect::<Vec<_>>();
 899
 900    Ok(CopilotChatRequest {
 901        intent: true,
 902        n: 1,
 903        stream: model.uses_streaming(),
 904        temperature: 0.1,
 905        model: model.id().to_string(),
 906        messages,
 907        tools,
 908        tool_choice: request.tool_choice.map(|choice| match choice {
 909            LanguageModelToolChoice::Auto => ToolChoice::Auto,
 910            LanguageModelToolChoice::Any => ToolChoice::Any,
 911            LanguageModelToolChoice::None => ToolChoice::None,
 912        }),
 913    })
 914}
 915
 916fn into_copilot_responses(
 917    model: &CopilotChatModel,
 918    request: LanguageModelRequest,
 919) -> copilot_responses::Request {
 920    use copilot_responses as responses;
 921
 922    let LanguageModelRequest {
 923        thread_id: _,
 924        prompt_id: _,
 925        intent: _,
 926        mode: _,
 927        messages,
 928        tools,
 929        tool_choice,
 930        stop: _,
 931        temperature,
 932        thinking_allowed: _,
 933    } = request;
 934
 935    let mut input_items: Vec<responses::ResponseInputItem> = Vec::new();
 936
 937    for message in messages {
 938        match message.role {
 939            Role::User => {
 940                for content in &message.content {
 941                    if let MessageContent::ToolResult(tool_result) = content {
 942                        let output = if let Some(out) = &tool_result.output {
 943                            match out {
 944                                serde_json::Value::String(s) => {
 945                                    responses::ResponseFunctionOutput::Text(s.clone())
 946                                }
 947                                serde_json::Value::Null => {
 948                                    responses::ResponseFunctionOutput::Text(String::new())
 949                                }
 950                                other => responses::ResponseFunctionOutput::Text(other.to_string()),
 951                            }
 952                        } else {
 953                            match &tool_result.content {
 954                                LanguageModelToolResultContent::Text(text) => {
 955                                    responses::ResponseFunctionOutput::Text(text.to_string())
 956                                }
 957                                LanguageModelToolResultContent::Image(image) => {
 958                                    if model.supports_vision() {
 959                                        responses::ResponseFunctionOutput::Content(vec![
 960                                            responses::ResponseInputContent::InputImage {
 961                                                image_url: Some(image.to_base64_url()),
 962                                                detail: Default::default(),
 963                                            },
 964                                        ])
 965                                    } else {
 966                                        debug_panic!(
 967                                            "This should be caught at {} level",
 968                                            tool_result.tool_name
 969                                        );
 970                                        responses::ResponseFunctionOutput::Text(
 971                                            "[Tool responded with an image, but this model does not support vision]".into(),
 972                                        )
 973                                    }
 974                                }
 975                            }
 976                        };
 977
 978                        input_items.push(responses::ResponseInputItem::FunctionCallOutput {
 979                            call_id: tool_result.tool_use_id.to_string(),
 980                            output,
 981                            status: None,
 982                        });
 983                    }
 984                }
 985
 986                let mut parts: Vec<responses::ResponseInputContent> = Vec::new();
 987                for content in &message.content {
 988                    match content {
 989                        MessageContent::Text(text) => {
 990                            parts.push(responses::ResponseInputContent::InputText {
 991                                text: text.clone(),
 992                            });
 993                        }
 994
 995                        MessageContent::Image(image) => {
 996                            if model.supports_vision() {
 997                                parts.push(responses::ResponseInputContent::InputImage {
 998                                    image_url: Some(image.to_base64_url()),
 999                                    detail: Default::default(),
1000                                });
1001                            }
1002                        }
1003                        _ => {}
1004                    }
1005                }
1006
1007                if !parts.is_empty() {
1008                    input_items.push(responses::ResponseInputItem::Message {
1009                        role: "user".into(),
1010                        content: Some(parts),
1011                        status: None,
1012                    });
1013                }
1014            }
1015
1016            Role::Assistant => {
1017                for content in &message.content {
1018                    if let MessageContent::ToolUse(tool_use) = content {
1019                        input_items.push(responses::ResponseInputItem::FunctionCall {
1020                            call_id: tool_use.id.to_string(),
1021                            name: tool_use.name.to_string(),
1022                            arguments: tool_use.raw_input.clone(),
1023                            status: None,
1024                            thought_signature: tool_use.thought_signature.clone(),
1025                        });
1026                    }
1027                }
1028
1029                for content in &message.content {
1030                    if let MessageContent::RedactedThinking(data) = content {
1031                        input_items.push(responses::ResponseInputItem::Reasoning {
1032                            id: None,
1033                            summary: Vec::new(),
1034                            encrypted_content: data.clone(),
1035                        });
1036                    }
1037                }
1038
1039                let mut parts: Vec<responses::ResponseInputContent> = Vec::new();
1040                for content in &message.content {
1041                    match content {
1042                        MessageContent::Text(text) => {
1043                            parts.push(responses::ResponseInputContent::OutputText {
1044                                text: text.clone(),
1045                            });
1046                        }
1047                        MessageContent::Image(_) => {
1048                            parts.push(responses::ResponseInputContent::OutputText {
1049                                text: "[image omitted]".to_string(),
1050                            });
1051                        }
1052                        _ => {}
1053                    }
1054                }
1055
1056                if !parts.is_empty() {
1057                    input_items.push(responses::ResponseInputItem::Message {
1058                        role: "assistant".into(),
1059                        content: Some(parts),
1060                        status: Some("completed".into()),
1061                    });
1062                }
1063            }
1064
1065            Role::System => {
1066                let mut parts: Vec<responses::ResponseInputContent> = Vec::new();
1067                for content in &message.content {
1068                    if let MessageContent::Text(text) = content {
1069                        parts.push(responses::ResponseInputContent::InputText {
1070                            text: text.clone(),
1071                        });
1072                    }
1073                }
1074
1075                if !parts.is_empty() {
1076                    input_items.push(responses::ResponseInputItem::Message {
1077                        role: "system".into(),
1078                        content: Some(parts),
1079                        status: None,
1080                    });
1081                }
1082            }
1083        }
1084    }
1085
1086    let converted_tools: Vec<responses::ToolDefinition> = tools
1087        .into_iter()
1088        .map(|tool| responses::ToolDefinition::Function {
1089            name: tool.name,
1090            description: Some(tool.description),
1091            parameters: Some(tool.input_schema),
1092            strict: None,
1093        })
1094        .collect();
1095
1096    let mapped_tool_choice = tool_choice.map(|choice| match choice {
1097        LanguageModelToolChoice::Auto => responses::ToolChoice::Auto,
1098        LanguageModelToolChoice::Any => responses::ToolChoice::Any,
1099        LanguageModelToolChoice::None => responses::ToolChoice::None,
1100    });
1101
1102    responses::Request {
1103        model: model.id().to_string(),
1104        input: input_items,
1105        stream: model.uses_streaming(),
1106        temperature,
1107        tools: converted_tools,
1108        tool_choice: mapped_tool_choice,
1109        reasoning: None, // We would need to add support for setting from user settings.
1110        include: Some(vec![
1111            copilot_responses::ResponseIncludable::ReasoningEncryptedContent,
1112        ]),
1113    }
1114}
1115
1116#[cfg(test)]
1117mod tests {
1118    use super::*;
1119    use copilot_chat::responses;
1120    use futures::StreamExt;
1121
1122    fn map_events(events: Vec<responses::StreamEvent>) -> Vec<LanguageModelCompletionEvent> {
1123        futures::executor::block_on(async {
1124            CopilotResponsesEventMapper::new()
1125                .map_stream(Box::pin(futures::stream::iter(events.into_iter().map(Ok))))
1126                .collect::<Vec<_>>()
1127                .await
1128                .into_iter()
1129                .map(Result::unwrap)
1130                .collect()
1131        })
1132    }
1133
1134    #[test]
1135    fn responses_stream_maps_text_and_usage() {
1136        let events = vec![
1137            responses::StreamEvent::OutputItemAdded {
1138                output_index: 0,
1139                sequence_number: None,
1140                item: responses::ResponseOutputItem::Message {
1141                    id: "msg_1".into(),
1142                    role: "assistant".into(),
1143                    content: Some(Vec::new()),
1144                },
1145            },
1146            responses::StreamEvent::OutputTextDelta {
1147                item_id: "msg_1".into(),
1148                output_index: 0,
1149                delta: "Hello".into(),
1150            },
1151            responses::StreamEvent::Completed {
1152                response: responses::Response {
1153                    usage: Some(responses::ResponseUsage {
1154                        input_tokens: Some(5),
1155                        output_tokens: Some(3),
1156                        total_tokens: Some(8),
1157                    }),
1158                    ..Default::default()
1159                },
1160            },
1161        ];
1162
1163        let mapped = map_events(events);
1164        assert!(matches!(
1165            mapped[0],
1166            LanguageModelCompletionEvent::StartMessage { ref message_id } if message_id == "msg_1"
1167        ));
1168        assert!(matches!(
1169            mapped[1],
1170            LanguageModelCompletionEvent::Text(ref text) if text == "Hello"
1171        ));
1172        assert!(matches!(
1173            mapped[2],
1174            LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
1175                input_tokens: 5,
1176                output_tokens: 3,
1177                ..
1178            })
1179        ));
1180        assert!(matches!(
1181            mapped[3],
1182            LanguageModelCompletionEvent::Stop(StopReason::EndTurn)
1183        ));
1184    }
1185
1186    #[test]
1187    fn responses_stream_maps_tool_calls() {
1188        let events = vec![responses::StreamEvent::OutputItemDone {
1189            output_index: 0,
1190            sequence_number: None,
1191            item: responses::ResponseOutputItem::FunctionCall {
1192                id: Some("fn_1".into()),
1193                call_id: "call_1".into(),
1194                name: "do_it".into(),
1195                arguments: "{\"x\":1}".into(),
1196                status: None,
1197                thought_signature: None,
1198            },
1199        }];
1200
1201        let mapped = map_events(events);
1202        assert!(matches!(
1203            mapped[0],
1204            LanguageModelCompletionEvent::ToolUse(ref use_) if use_.id.to_string() == "call_1" && use_.name.as_ref() == "do_it"
1205        ));
1206        assert!(matches!(
1207            mapped[1],
1208            LanguageModelCompletionEvent::Stop(StopReason::ToolUse)
1209        ));
1210    }
1211
1212    #[test]
1213    fn responses_stream_handles_json_parse_error() {
1214        let events = vec![responses::StreamEvent::OutputItemDone {
1215            output_index: 0,
1216            sequence_number: None,
1217            item: responses::ResponseOutputItem::FunctionCall {
1218                id: Some("fn_1".into()),
1219                call_id: "call_1".into(),
1220                name: "do_it".into(),
1221                arguments: "{not json}".into(),
1222                status: None,
1223                thought_signature: None,
1224            },
1225        }];
1226
1227        let mapped = map_events(events);
1228        assert!(matches!(
1229            mapped[0],
1230            LanguageModelCompletionEvent::ToolUseJsonParseError { ref id, ref tool_name, .. }
1231                if id.to_string() == "call_1" && tool_name.as_ref() == "do_it"
1232        ));
1233        assert!(matches!(
1234            mapped[1],
1235            LanguageModelCompletionEvent::Stop(StopReason::ToolUse)
1236        ));
1237    }
1238
1239    #[test]
1240    fn responses_stream_maps_reasoning_summary_and_encrypted_content() {
1241        let events = vec![responses::StreamEvent::OutputItemDone {
1242            output_index: 0,
1243            sequence_number: None,
1244            item: responses::ResponseOutputItem::Reasoning {
1245                id: "r1".into(),
1246                summary: Some(vec![responses::ResponseReasoningItem {
1247                    kind: "summary_text".into(),
1248                    text: "Chain".into(),
1249                }]),
1250                encrypted_content: Some("ENC".into()),
1251            },
1252        }];
1253
1254        let mapped = map_events(events);
1255        assert!(matches!(
1256            mapped[0],
1257            LanguageModelCompletionEvent::Thinking { ref text, signature: None } if text == "Chain"
1258        ));
1259        assert!(matches!(
1260            mapped[1],
1261            LanguageModelCompletionEvent::RedactedThinking { ref data } if data == "ENC"
1262        ));
1263    }
1264
1265    #[test]
1266    fn responses_stream_handles_incomplete_max_tokens() {
1267        let events = vec![responses::StreamEvent::Incomplete {
1268            response: responses::Response {
1269                usage: Some(responses::ResponseUsage {
1270                    input_tokens: Some(10),
1271                    output_tokens: Some(0),
1272                    total_tokens: Some(10),
1273                }),
1274                incomplete_details: Some(responses::IncompleteDetails {
1275                    reason: Some(responses::IncompleteReason::MaxOutputTokens),
1276                }),
1277                ..Default::default()
1278            },
1279        }];
1280
1281        let mapped = map_events(events);
1282        assert!(matches!(
1283            mapped[0],
1284            LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
1285                input_tokens: 10,
1286                output_tokens: 0,
1287                ..
1288            })
1289        ));
1290        assert!(matches!(
1291            mapped[1],
1292            LanguageModelCompletionEvent::Stop(StopReason::MaxTokens)
1293        ));
1294    }
1295
1296    #[test]
1297    fn responses_stream_handles_incomplete_content_filter() {
1298        let events = vec![responses::StreamEvent::Incomplete {
1299            response: responses::Response {
1300                usage: None,
1301                incomplete_details: Some(responses::IncompleteDetails {
1302                    reason: Some(responses::IncompleteReason::ContentFilter),
1303                }),
1304                ..Default::default()
1305            },
1306        }];
1307
1308        let mapped = map_events(events);
1309        assert!(matches!(
1310            mapped.last().unwrap(),
1311            LanguageModelCompletionEvent::Stop(StopReason::Refusal)
1312        ));
1313    }
1314
1315    #[test]
1316    fn responses_stream_completed_no_duplicate_after_tool_use() {
1317        let events = vec![
1318            responses::StreamEvent::OutputItemDone {
1319                output_index: 0,
1320                sequence_number: None,
1321                item: responses::ResponseOutputItem::FunctionCall {
1322                    id: Some("fn_1".into()),
1323                    call_id: "call_1".into(),
1324                    name: "do_it".into(),
1325                    arguments: "{}".into(),
1326                    status: None,
1327                    thought_signature: None,
1328                },
1329            },
1330            responses::StreamEvent::Completed {
1331                response: responses::Response::default(),
1332            },
1333        ];
1334
1335        let mapped = map_events(events);
1336
1337        let mut stop_count = 0usize;
1338        let mut saw_tool_use_stop = false;
1339        for event in mapped {
1340            if let LanguageModelCompletionEvent::Stop(reason) = event {
1341                stop_count += 1;
1342                if matches!(reason, StopReason::ToolUse) {
1343                    saw_tool_use_stop = true;
1344                }
1345            }
1346        }
1347        assert_eq!(stop_count, 1, "should emit exactly one Stop event");
1348        assert!(saw_tool_use_stop, "Stop reason should be ToolUse");
1349    }
1350
1351    #[test]
1352    fn responses_stream_failed_maps_http_response_error() {
1353        let events = vec![responses::StreamEvent::Failed {
1354            response: responses::Response {
1355                error: Some(responses::ResponseError {
1356                    code: "429".into(),
1357                    message: "too many requests".into(),
1358                }),
1359                ..Default::default()
1360            },
1361        }];
1362
1363        let mapped_results = futures::executor::block_on(async {
1364            CopilotResponsesEventMapper::new()
1365                .map_stream(Box::pin(futures::stream::iter(events.into_iter().map(Ok))))
1366                .collect::<Vec<_>>()
1367                .await
1368        });
1369
1370        assert_eq!(mapped_results.len(), 1);
1371        match &mapped_results[0] {
1372            Err(LanguageModelCompletionError::HttpResponseError {
1373                status_code,
1374                message,
1375                ..
1376            }) => {
1377                assert_eq!(*status_code, http_client::StatusCode::TOO_MANY_REQUESTS);
1378                assert_eq!(message, "too many requests");
1379            }
1380            other => panic!("expected HttpResponseError, got {:?}", other),
1381        }
1382    }
1383
1384    #[test]
1385    fn chat_completions_stream_maps_reasoning_data() {
1386        use copilot_chat::{
1387            FunctionChunk, ResponseChoice, ResponseDelta, ResponseEvent, Role, ToolCallChunk,
1388        };
1389
1390        let events = vec![
1391            ResponseEvent {
1392                choices: vec![ResponseChoice {
1393                    index: Some(0),
1394                    finish_reason: None,
1395                    delta: Some(ResponseDelta {
1396                        content: None,
1397                        role: Some(Role::Assistant),
1398                        tool_calls: vec![ToolCallChunk {
1399                            index: Some(0),
1400                            id: Some("call_abc123".to_string()),
1401                            function: Some(FunctionChunk {
1402                                name: Some("list_directory".to_string()),
1403                                arguments: Some("{\"path\":\"test\"}".to_string()),
1404                                thought_signature: None,
1405                            }),
1406                        }],
1407                        reasoning_opaque: Some("encrypted_reasoning_token_xyz".to_string()),
1408                        reasoning_text: Some("Let me check the directory".to_string()),
1409                    }),
1410                    message: None,
1411                }],
1412                id: "chatcmpl-123".to_string(),
1413                usage: None,
1414            },
1415            ResponseEvent {
1416                choices: vec![ResponseChoice {
1417                    index: Some(0),
1418                    finish_reason: Some("tool_calls".to_string()),
1419                    delta: Some(ResponseDelta {
1420                        content: None,
1421                        role: None,
1422                        tool_calls: vec![],
1423                        reasoning_opaque: None,
1424                        reasoning_text: None,
1425                    }),
1426                    message: None,
1427                }],
1428                id: "chatcmpl-123".to_string(),
1429                usage: None,
1430            },
1431        ];
1432
1433        let mapped = futures::executor::block_on(async {
1434            map_to_language_model_completion_events(
1435                Box::pin(futures::stream::iter(events.into_iter().map(Ok))),
1436                true,
1437            )
1438            .collect::<Vec<_>>()
1439            .await
1440        });
1441
1442        let mut has_reasoning_details = false;
1443        let mut has_tool_use = false;
1444        let mut reasoning_opaque_value: Option<String> = None;
1445        let mut reasoning_text_value: Option<String> = None;
1446
1447        for event_result in mapped {
1448            match event_result {
1449                Ok(LanguageModelCompletionEvent::ReasoningDetails(details)) => {
1450                    has_reasoning_details = true;
1451                    reasoning_opaque_value = details
1452                        .get("reasoning_opaque")
1453                        .and_then(|v| v.as_str())
1454                        .map(|s| s.to_string());
1455                    reasoning_text_value = details
1456                        .get("reasoning_text")
1457                        .and_then(|v| v.as_str())
1458                        .map(|s| s.to_string());
1459                }
1460                Ok(LanguageModelCompletionEvent::ToolUse(tool_use)) => {
1461                    has_tool_use = true;
1462                    assert_eq!(tool_use.id.to_string(), "call_abc123");
1463                    assert_eq!(tool_use.name.as_ref(), "list_directory");
1464                }
1465                _ => {}
1466            }
1467        }
1468
1469        assert!(
1470            has_reasoning_details,
1471            "Should emit ReasoningDetails event for Gemini 3 reasoning"
1472        );
1473        assert!(has_tool_use, "Should emit ToolUse event");
1474        assert_eq!(
1475            reasoning_opaque_value,
1476            Some("encrypted_reasoning_token_xyz".to_string()),
1477            "Should capture reasoning_opaque"
1478        );
1479        assert_eq!(
1480            reasoning_text_value,
1481            Some("Let me check the directory".to_string()),
1482            "Should capture reasoning_text"
1483        );
1484    }
1485}