copilot_chat.rs

   1use std::pin::Pin;
   2use std::str::FromStr as _;
   3use std::sync::Arc;
   4
   5use anyhow::{Result, anyhow};
   6use cloud_llm_client::CompletionIntent;
   7use collections::HashMap;
   8use copilot::{Copilot, Status};
   9use copilot_chat::responses as copilot_responses;
  10use copilot_chat::{
  11    ChatMessage, ChatMessageContent, ChatMessagePart, CopilotChat, CopilotChatConfiguration,
  12    Function, FunctionContent, ImageUrl, Model as CopilotChatModel, ModelVendor,
  13    Request as CopilotChatRequest, ResponseEvent, Tool, ToolCall, ToolCallContent, ToolChoice,
  14};
  15use futures::future::BoxFuture;
  16use futures::stream::BoxStream;
  17use futures::{FutureExt, Stream, StreamExt};
  18use gpui::{AnyView, App, AsyncApp, Entity, Subscription, Task};
  19use http_client::StatusCode;
  20use language::language_settings::all_language_settings;
  21use language_model::{
  22    AuthenticateError, IconOrSvg, LanguageModel, LanguageModelCompletionError,
  23    LanguageModelCompletionEvent, LanguageModelId, LanguageModelName, LanguageModelProvider,
  24    LanguageModelProviderId, LanguageModelProviderName, LanguageModelProviderState,
  25    LanguageModelRequest, LanguageModelRequestMessage, LanguageModelToolChoice,
  26    LanguageModelToolResultContent, LanguageModelToolSchemaFormat, LanguageModelToolUse,
  27    MessageContent, RateLimiter, Role, StopReason, TokenUsage,
  28};
  29use settings::SettingsStore;
  30use ui::prelude::*;
  31use util::debug_panic;
  32
  33const PROVIDER_ID: LanguageModelProviderId = LanguageModelProviderId::new("copilot_chat");
  34const PROVIDER_NAME: LanguageModelProviderName =
  35    LanguageModelProviderName::new("GitHub Copilot Chat");
  36
  37pub struct CopilotChatLanguageModelProvider {
  38    state: Entity<State>,
  39}
  40
  41pub struct State {
  42    _copilot_chat_subscription: Option<Subscription>,
  43    _settings_subscription: Subscription,
  44}
  45
  46impl State {
  47    fn is_authenticated(&self, cx: &App) -> bool {
  48        CopilotChat::global(cx)
  49            .map(|m| m.read(cx).is_authenticated())
  50            .unwrap_or(false)
  51    }
  52}
  53
  54impl CopilotChatLanguageModelProvider {
  55    pub fn new(cx: &mut App) -> Self {
  56        let state = cx.new(|cx| {
  57            let copilot_chat_subscription = CopilotChat::global(cx)
  58                .map(|copilot_chat| cx.observe(&copilot_chat, |_, _, cx| cx.notify()));
  59            State {
  60                _copilot_chat_subscription: copilot_chat_subscription,
  61                _settings_subscription: cx.observe_global::<SettingsStore>(|_, cx| {
  62                    if let Some(copilot_chat) = CopilotChat::global(cx) {
  63                        let language_settings = all_language_settings(None, cx);
  64                        let configuration = CopilotChatConfiguration {
  65                            enterprise_uri: language_settings
  66                                .edit_predictions
  67                                .copilot
  68                                .enterprise_uri
  69                                .clone(),
  70                        };
  71                        copilot_chat.update(cx, |chat, cx| {
  72                            chat.set_configuration(configuration, cx);
  73                        });
  74                    }
  75                    cx.notify();
  76                }),
  77            }
  78        });
  79
  80        Self { state }
  81    }
  82
  83    fn create_language_model(&self, model: CopilotChatModel) -> Arc<dyn LanguageModel> {
  84        Arc::new(CopilotChatLanguageModel {
  85            model,
  86            request_limiter: RateLimiter::new(4),
  87        })
  88    }
  89}
  90
  91impl LanguageModelProviderState for CopilotChatLanguageModelProvider {
  92    type ObservableEntity = State;
  93
  94    fn observable_entity(&self) -> Option<Entity<Self::ObservableEntity>> {
  95        Some(self.state.clone())
  96    }
  97}
  98
  99impl LanguageModelProvider for CopilotChatLanguageModelProvider {
 100    fn id(&self) -> LanguageModelProviderId {
 101        PROVIDER_ID
 102    }
 103
 104    fn name(&self) -> LanguageModelProviderName {
 105        PROVIDER_NAME
 106    }
 107
 108    fn icon(&self) -> IconOrSvg {
 109        IconOrSvg::Icon(IconName::Copilot)
 110    }
 111
 112    fn default_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
 113        let models = CopilotChat::global(cx).and_then(|m| m.read(cx).models())?;
 114        models
 115            .first()
 116            .map(|model| self.create_language_model(model.clone()))
 117    }
 118
 119    fn default_fast_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
 120        // The default model should be Copilot Chat's 'base model', which is likely a relatively fast
 121        // model (e.g. 4o) and a sensible choice when considering premium requests
 122        self.default_model(cx)
 123    }
 124
 125    fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
 126        let Some(models) = CopilotChat::global(cx).and_then(|m| m.read(cx).models()) else {
 127            return Vec::new();
 128        };
 129        models
 130            .iter()
 131            .map(|model| self.create_language_model(model.clone()))
 132            .collect()
 133    }
 134
 135    fn is_authenticated(&self, cx: &App) -> bool {
 136        self.state.read(cx).is_authenticated(cx)
 137    }
 138
 139    fn authenticate(&self, cx: &mut App) -> Task<Result<(), AuthenticateError>> {
 140        if self.is_authenticated(cx) {
 141            return Task::ready(Ok(()));
 142        };
 143
 144        let Some(copilot) = Copilot::global(cx) else {
 145            return Task::ready(Err(anyhow!(concat!(
 146                "Copilot must be enabled for Copilot Chat to work. ",
 147                "Please enable Copilot and try again."
 148            ))
 149            .into()));
 150        };
 151
 152        let err = match copilot.read(cx).status() {
 153            Status::Authorized => return Task::ready(Ok(())),
 154            Status::Disabled => anyhow!(
 155                "Copilot must be enabled for Copilot Chat to work. Please enable Copilot and try again."
 156            ),
 157            Status::Error(err) => anyhow!(format!(
 158                "Received the following error while signing into Copilot: {err}"
 159            )),
 160            Status::Starting { task: _ } => anyhow!(
 161                "Copilot is still starting, please wait for Copilot to start then try again"
 162            ),
 163            Status::Unauthorized => anyhow!(
 164                "Unable to authorize with Copilot. Please make sure that you have an active Copilot and Copilot Chat subscription."
 165            ),
 166            Status::SignedOut { .. } => {
 167                anyhow!("You have signed out of Copilot. Please sign in to Copilot and try again.")
 168            }
 169            Status::SigningIn { prompt: _ } => anyhow!("Still signing into Copilot..."),
 170        };
 171
 172        Task::ready(Err(err.into()))
 173    }
 174
 175    fn configuration_view(
 176        &self,
 177        _target_agent: language_model::ConfigurationViewTargetAgent,
 178        _: &mut Window,
 179        cx: &mut App,
 180    ) -> AnyView {
 181        cx.new(|cx| {
 182            copilot_ui::ConfigurationView::new(
 183                |cx| {
 184                    CopilotChat::global(cx)
 185                        .map(|m| m.read(cx).is_authenticated())
 186                        .unwrap_or(false)
 187                },
 188                copilot_ui::ConfigurationMode::Chat,
 189                cx,
 190            )
 191        })
 192        .into()
 193    }
 194
 195    fn reset_credentials(&self, _cx: &mut App) -> Task<Result<()>> {
 196        Task::ready(Err(anyhow!(
 197            "Signing out of GitHub Copilot Chat is currently not supported."
 198        )))
 199    }
 200}
 201
 202fn collect_tiktoken_messages(
 203    request: LanguageModelRequest,
 204) -> Vec<tiktoken_rs::ChatCompletionRequestMessage> {
 205    request
 206        .messages
 207        .into_iter()
 208        .map(|message| tiktoken_rs::ChatCompletionRequestMessage {
 209            role: match message.role {
 210                Role::User => "user".into(),
 211                Role::Assistant => "assistant".into(),
 212                Role::System => "system".into(),
 213            },
 214            content: Some(message.string_contents()),
 215            name: None,
 216            function_call: None,
 217        })
 218        .collect::<Vec<_>>()
 219}
 220
 221pub struct CopilotChatLanguageModel {
 222    model: CopilotChatModel,
 223    request_limiter: RateLimiter,
 224}
 225
 226impl LanguageModel for CopilotChatLanguageModel {
 227    fn id(&self) -> LanguageModelId {
 228        LanguageModelId::from(self.model.id().to_string())
 229    }
 230
 231    fn name(&self) -> LanguageModelName {
 232        LanguageModelName::from(self.model.display_name().to_string())
 233    }
 234
 235    fn provider_id(&self) -> LanguageModelProviderId {
 236        PROVIDER_ID
 237    }
 238
 239    fn provider_name(&self) -> LanguageModelProviderName {
 240        PROVIDER_NAME
 241    }
 242
 243    fn supports_tools(&self) -> bool {
 244        self.model.supports_tools()
 245    }
 246
 247    fn supports_images(&self) -> bool {
 248        self.model.supports_vision()
 249    }
 250
 251    fn tool_input_format(&self) -> LanguageModelToolSchemaFormat {
 252        match self.model.vendor() {
 253            ModelVendor::OpenAI | ModelVendor::Anthropic => {
 254                LanguageModelToolSchemaFormat::JsonSchema
 255            }
 256            ModelVendor::Google | ModelVendor::XAI | ModelVendor::Unknown => {
 257                LanguageModelToolSchemaFormat::JsonSchemaSubset
 258            }
 259        }
 260    }
 261
 262    fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
 263        match choice {
 264            LanguageModelToolChoice::Auto
 265            | LanguageModelToolChoice::Any
 266            | LanguageModelToolChoice::None => self.supports_tools(),
 267        }
 268    }
 269
 270    fn telemetry_id(&self) -> String {
 271        format!("copilot_chat/{}", self.model.id())
 272    }
 273
 274    fn max_token_count(&self) -> u64 {
 275        self.model.max_token_count()
 276    }
 277
 278    fn count_tokens(
 279        &self,
 280        request: LanguageModelRequest,
 281        cx: &App,
 282    ) -> BoxFuture<'static, Result<u64>> {
 283        let model = self.model.clone();
 284        cx.background_spawn(async move {
 285            let messages = collect_tiktoken_messages(request);
 286            // Copilot uses OpenAI tiktoken tokenizer for all it's model irrespective of the underlying provider(vendor).
 287            let tokenizer_model = match model.tokenizer() {
 288                Some("o200k_base") => "gpt-4o",
 289                Some("cl100k_base") => "gpt-4",
 290                _ => "gpt-4o",
 291            };
 292
 293            tiktoken_rs::num_tokens_from_messages(tokenizer_model, &messages)
 294                .map(|tokens| tokens as u64)
 295        })
 296        .boxed()
 297    }
 298
 299    fn stream_completion(
 300        &self,
 301        request: LanguageModelRequest,
 302        cx: &AsyncApp,
 303    ) -> BoxFuture<
 304        'static,
 305        Result<
 306            BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>,
 307            LanguageModelCompletionError,
 308        >,
 309    > {
 310        let is_user_initiated = request.intent.is_none_or(|intent| match intent {
 311            CompletionIntent::UserPrompt
 312            | CompletionIntent::ThreadContextSummarization
 313            | CompletionIntent::InlineAssist
 314            | CompletionIntent::TerminalInlineAssist
 315            | CompletionIntent::GenerateGitCommitMessage => true,
 316
 317            CompletionIntent::ToolResults
 318            | CompletionIntent::ThreadSummarization
 319            | CompletionIntent::CreateFile
 320            | CompletionIntent::EditFile => false,
 321        });
 322
 323        if self.model.supports_response() {
 324            let responses_request = into_copilot_responses(&self.model, request);
 325            let request_limiter = self.request_limiter.clone();
 326            let future = cx.spawn(async move |cx| {
 327                let request =
 328                    CopilotChat::stream_response(responses_request, is_user_initiated, cx.clone());
 329                request_limiter
 330                    .stream(async move {
 331                        let stream = request.await?;
 332                        let mapper = CopilotResponsesEventMapper::new();
 333                        Ok(mapper.map_stream(stream).boxed())
 334                    })
 335                    .await
 336            });
 337            return async move { Ok(future.await?.boxed()) }.boxed();
 338        }
 339
 340        let copilot_request = match into_copilot_chat(&self.model, request) {
 341            Ok(request) => request,
 342            Err(err) => return futures::future::ready(Err(err.into())).boxed(),
 343        };
 344        let is_streaming = copilot_request.stream;
 345
 346        let request_limiter = self.request_limiter.clone();
 347        let future = cx.spawn(async move |cx| {
 348            let request =
 349                CopilotChat::stream_completion(copilot_request, is_user_initiated, cx.clone());
 350            request_limiter
 351                .stream(async move {
 352                    let response = request.await?;
 353                    Ok(map_to_language_model_completion_events(
 354                        response,
 355                        is_streaming,
 356                    ))
 357                })
 358                .await
 359        });
 360        async move { Ok(future.await?.boxed()) }.boxed()
 361    }
 362}
 363
 364pub fn map_to_language_model_completion_events(
 365    events: Pin<Box<dyn Send + Stream<Item = Result<ResponseEvent>>>>,
 366    is_streaming: bool,
 367) -> impl Stream<Item = Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
 368    #[derive(Default)]
 369    struct RawToolCall {
 370        id: String,
 371        name: String,
 372        arguments: String,
 373        thought_signature: Option<String>,
 374    }
 375
 376    struct State {
 377        events: Pin<Box<dyn Send + Stream<Item = Result<ResponseEvent>>>>,
 378        tool_calls_by_index: HashMap<usize, RawToolCall>,
 379        reasoning_opaque: Option<String>,
 380        reasoning_text: Option<String>,
 381    }
 382
 383    futures::stream::unfold(
 384        State {
 385            events,
 386            tool_calls_by_index: HashMap::default(),
 387            reasoning_opaque: None,
 388            reasoning_text: None,
 389        },
 390        move |mut state| async move {
 391            if let Some(event) = state.events.next().await {
 392                match event {
 393                    Ok(event) => {
 394                        let Some(choice) = event.choices.first() else {
 395                            return Some((
 396                                vec![Err(anyhow!("Response contained no choices").into())],
 397                                state,
 398                            ));
 399                        };
 400
 401                        let delta = if is_streaming {
 402                            choice.delta.as_ref()
 403                        } else {
 404                            choice.message.as_ref()
 405                        };
 406
 407                        let Some(delta) = delta else {
 408                            return Some((
 409                                vec![Err(anyhow!("Response contained no delta").into())],
 410                                state,
 411                            ));
 412                        };
 413
 414                        let mut events = Vec::new();
 415                        if let Some(content) = delta.content.clone() {
 416                            events.push(Ok(LanguageModelCompletionEvent::Text(content)));
 417                        }
 418
 419                        // Capture reasoning data from the delta (e.g. for Gemini 3)
 420                        if let Some(opaque) = delta.reasoning_opaque.clone() {
 421                            state.reasoning_opaque = Some(opaque);
 422                        }
 423                        if let Some(text) = delta.reasoning_text.clone() {
 424                            state.reasoning_text = Some(text);
 425                        }
 426
 427                        for (index, tool_call) in delta.tool_calls.iter().enumerate() {
 428                            let tool_index = tool_call.index.unwrap_or(index);
 429                            let entry = state.tool_calls_by_index.entry(tool_index).or_default();
 430
 431                            if let Some(tool_id) = tool_call.id.clone() {
 432                                entry.id = tool_id;
 433                            }
 434
 435                            if let Some(function) = tool_call.function.as_ref() {
 436                                if let Some(name) = function.name.clone() {
 437                                    entry.name = name;
 438                                }
 439
 440                                if let Some(arguments) = function.arguments.clone() {
 441                                    entry.arguments.push_str(&arguments);
 442                                }
 443
 444                                if let Some(thought_signature) = function.thought_signature.clone()
 445                                {
 446                                    entry.thought_signature = Some(thought_signature);
 447                                }
 448                            }
 449                        }
 450
 451                        if let Some(usage) = event.usage {
 452                            events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(
 453                                TokenUsage {
 454                                    input_tokens: usage.prompt_tokens,
 455                                    output_tokens: usage.completion_tokens,
 456                                    cache_creation_input_tokens: 0,
 457                                    cache_read_input_tokens: 0,
 458                                },
 459                            )));
 460                        }
 461
 462                        match choice.finish_reason.as_deref() {
 463                            Some("stop") => {
 464                                events.push(Ok(LanguageModelCompletionEvent::Stop(
 465                                    StopReason::EndTurn,
 466                                )));
 467                            }
 468                            Some("tool_calls") => {
 469                                // Gemini 3 models send reasoning_opaque/reasoning_text that must
 470                                // be preserved and sent back in subsequent requests. Emit as
 471                                // ReasoningDetails so the agent stores it in the message.
 472                                if state.reasoning_opaque.is_some()
 473                                    || state.reasoning_text.is_some()
 474                                {
 475                                    let mut details = serde_json::Map::new();
 476                                    if let Some(opaque) = state.reasoning_opaque.take() {
 477                                        details.insert(
 478                                            "reasoning_opaque".to_string(),
 479                                            serde_json::Value::String(opaque),
 480                                        );
 481                                    }
 482                                    if let Some(text) = state.reasoning_text.take() {
 483                                        details.insert(
 484                                            "reasoning_text".to_string(),
 485                                            serde_json::Value::String(text),
 486                                        );
 487                                    }
 488                                    events.push(Ok(
 489                                        LanguageModelCompletionEvent::ReasoningDetails(
 490                                            serde_json::Value::Object(details),
 491                                        ),
 492                                    ));
 493                                }
 494
 495                                events.extend(state.tool_calls_by_index.drain().map(
 496                                    |(_, tool_call)| {
 497                                        // The model can output an empty string
 498                                        // to indicate the absence of arguments.
 499                                        // When that happens, create an empty
 500                                        // object instead.
 501                                        let arguments = if tool_call.arguments.is_empty() {
 502                                            Ok(serde_json::Value::Object(Default::default()))
 503                                        } else {
 504                                            serde_json::Value::from_str(&tool_call.arguments)
 505                                        };
 506                                        match arguments {
 507                                        Ok(input) => Ok(LanguageModelCompletionEvent::ToolUse(
 508                                            LanguageModelToolUse {
 509                                                id: tool_call.id.into(),
 510                                                name: tool_call.name.as_str().into(),
 511                                                is_input_complete: true,
 512                                                input,
 513                                                raw_input: tool_call.arguments,
 514                                                thought_signature: tool_call.thought_signature,
 515                                            },
 516                                        )),
 517                                        Err(error) => Ok(
 518                                            LanguageModelCompletionEvent::ToolUseJsonParseError {
 519                                                id: tool_call.id.into(),
 520                                                tool_name: tool_call.name.as_str().into(),
 521                                                raw_input: tool_call.arguments.into(),
 522                                                json_parse_error: error.to_string(),
 523                                            },
 524                                        ),
 525                                    }
 526                                    },
 527                                ));
 528
 529                                events.push(Ok(LanguageModelCompletionEvent::Stop(
 530                                    StopReason::ToolUse,
 531                                )));
 532                            }
 533                            Some(stop_reason) => {
 534                                log::error!("Unexpected Copilot Chat stop_reason: {stop_reason:?}");
 535                                events.push(Ok(LanguageModelCompletionEvent::Stop(
 536                                    StopReason::EndTurn,
 537                                )));
 538                            }
 539                            None => {}
 540                        }
 541
 542                        return Some((events, state));
 543                    }
 544                    Err(err) => return Some((vec![Err(anyhow!(err).into())], state)),
 545                }
 546            }
 547
 548            None
 549        },
 550    )
 551    .flat_map(futures::stream::iter)
 552}
 553
 554pub struct CopilotResponsesEventMapper {
 555    pending_stop_reason: Option<StopReason>,
 556}
 557
 558impl CopilotResponsesEventMapper {
 559    pub fn new() -> Self {
 560        Self {
 561            pending_stop_reason: None,
 562        }
 563    }
 564
 565    pub fn map_stream(
 566        mut self,
 567        events: Pin<Box<dyn Send + Stream<Item = Result<copilot_responses::StreamEvent>>>>,
 568    ) -> impl Stream<Item = Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
 569    {
 570        events.flat_map(move |event| {
 571            futures::stream::iter(match event {
 572                Ok(event) => self.map_event(event),
 573                Err(error) => vec![Err(LanguageModelCompletionError::from(anyhow!(error)))],
 574            })
 575        })
 576    }
 577
 578    fn map_event(
 579        &mut self,
 580        event: copilot_responses::StreamEvent,
 581    ) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
 582        match event {
 583            copilot_responses::StreamEvent::OutputItemAdded { item, .. } => match item {
 584                copilot_responses::ResponseOutputItem::Message { id, .. } => {
 585                    vec![Ok(LanguageModelCompletionEvent::StartMessage {
 586                        message_id: id,
 587                    })]
 588                }
 589                _ => Vec::new(),
 590            },
 591
 592            copilot_responses::StreamEvent::OutputTextDelta { delta, .. } => {
 593                if delta.is_empty() {
 594                    Vec::new()
 595                } else {
 596                    vec![Ok(LanguageModelCompletionEvent::Text(delta))]
 597                }
 598            }
 599
 600            copilot_responses::StreamEvent::OutputItemDone { item, .. } => match item {
 601                copilot_responses::ResponseOutputItem::Message { .. } => Vec::new(),
 602                copilot_responses::ResponseOutputItem::FunctionCall {
 603                    call_id,
 604                    name,
 605                    arguments,
 606                    thought_signature,
 607                    ..
 608                } => {
 609                    let mut events = Vec::new();
 610                    match serde_json::from_str::<serde_json::Value>(&arguments) {
 611                        Ok(input) => events.push(Ok(LanguageModelCompletionEvent::ToolUse(
 612                            LanguageModelToolUse {
 613                                id: call_id.into(),
 614                                name: name.as_str().into(),
 615                                is_input_complete: true,
 616                                input,
 617                                raw_input: arguments.clone(),
 618                                thought_signature,
 619                            },
 620                        ))),
 621                        Err(error) => {
 622                            events.push(Ok(LanguageModelCompletionEvent::ToolUseJsonParseError {
 623                                id: call_id.into(),
 624                                tool_name: name.as_str().into(),
 625                                raw_input: arguments.clone().into(),
 626                                json_parse_error: error.to_string(),
 627                            }))
 628                        }
 629                    }
 630                    // Record that we already emitted a tool-use stop so we can avoid duplicating
 631                    // a Stop event on Completed.
 632                    self.pending_stop_reason = Some(StopReason::ToolUse);
 633                    events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::ToolUse)));
 634                    events
 635                }
 636                copilot_responses::ResponseOutputItem::Reasoning {
 637                    summary,
 638                    encrypted_content,
 639                    ..
 640                } => {
 641                    let mut events = Vec::new();
 642
 643                    if let Some(blocks) = summary {
 644                        let mut text = String::new();
 645                        for block in blocks {
 646                            text.push_str(&block.text);
 647                        }
 648                        if !text.is_empty() {
 649                            events.push(Ok(LanguageModelCompletionEvent::Thinking {
 650                                text,
 651                                signature: None,
 652                            }));
 653                        }
 654                    }
 655
 656                    if let Some(data) = encrypted_content {
 657                        events.push(Ok(LanguageModelCompletionEvent::RedactedThinking { data }));
 658                    }
 659
 660                    events
 661                }
 662            },
 663
 664            copilot_responses::StreamEvent::Completed { response } => {
 665                let mut events = Vec::new();
 666                if let Some(usage) = response.usage {
 667                    events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
 668                        input_tokens: usage.input_tokens.unwrap_or(0),
 669                        output_tokens: usage.output_tokens.unwrap_or(0),
 670                        cache_creation_input_tokens: 0,
 671                        cache_read_input_tokens: 0,
 672                    })));
 673                }
 674                if self.pending_stop_reason.take() != Some(StopReason::ToolUse) {
 675                    events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::EndTurn)));
 676                }
 677                events
 678            }
 679
 680            copilot_responses::StreamEvent::Incomplete { response } => {
 681                let reason = response
 682                    .incomplete_details
 683                    .as_ref()
 684                    .and_then(|details| details.reason.as_ref());
 685                let stop_reason = match reason {
 686                    Some(copilot_responses::IncompleteReason::MaxOutputTokens) => {
 687                        StopReason::MaxTokens
 688                    }
 689                    Some(copilot_responses::IncompleteReason::ContentFilter) => StopReason::Refusal,
 690                    _ => self
 691                        .pending_stop_reason
 692                        .take()
 693                        .unwrap_or(StopReason::EndTurn),
 694                };
 695
 696                let mut events = Vec::new();
 697                if let Some(usage) = response.usage {
 698                    events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
 699                        input_tokens: usage.input_tokens.unwrap_or(0),
 700                        output_tokens: usage.output_tokens.unwrap_or(0),
 701                        cache_creation_input_tokens: 0,
 702                        cache_read_input_tokens: 0,
 703                    })));
 704                }
 705                events.push(Ok(LanguageModelCompletionEvent::Stop(stop_reason)));
 706                events
 707            }
 708
 709            copilot_responses::StreamEvent::Failed { response } => {
 710                let provider = PROVIDER_NAME;
 711                let (status_code, message) = match response.error {
 712                    Some(error) => {
 713                        let status_code = StatusCode::from_str(&error.code)
 714                            .unwrap_or(StatusCode::INTERNAL_SERVER_ERROR);
 715                        (status_code, error.message)
 716                    }
 717                    None => (
 718                        StatusCode::INTERNAL_SERVER_ERROR,
 719                        "response.failed".to_string(),
 720                    ),
 721                };
 722                vec![Err(LanguageModelCompletionError::HttpResponseError {
 723                    provider,
 724                    status_code,
 725                    message,
 726                })]
 727            }
 728
 729            copilot_responses::StreamEvent::GenericError { error } => vec![Err(
 730                LanguageModelCompletionError::Other(anyhow!(format!("{error:?}"))),
 731            )],
 732
 733            copilot_responses::StreamEvent::Created { .. }
 734            | copilot_responses::StreamEvent::Unknown => Vec::new(),
 735        }
 736    }
 737}
 738
 739fn into_copilot_chat(
 740    model: &CopilotChatModel,
 741    request: LanguageModelRequest,
 742) -> Result<CopilotChatRequest> {
 743    let mut request_messages: Vec<LanguageModelRequestMessage> = Vec::new();
 744    for message in request.messages {
 745        if let Some(last_message) = request_messages.last_mut() {
 746            if last_message.role == message.role {
 747                last_message.content.extend(message.content);
 748            } else {
 749                request_messages.push(message);
 750            }
 751        } else {
 752            request_messages.push(message);
 753        }
 754    }
 755
 756    let mut messages: Vec<ChatMessage> = Vec::new();
 757    for message in request_messages {
 758        match message.role {
 759            Role::User => {
 760                for content in &message.content {
 761                    if let MessageContent::ToolResult(tool_result) = content {
 762                        let content = match &tool_result.content {
 763                            LanguageModelToolResultContent::Text(text) => text.to_string().into(),
 764                            LanguageModelToolResultContent::Image(image) => {
 765                                if model.supports_vision() {
 766                                    ChatMessageContent::Multipart(vec![ChatMessagePart::Image {
 767                                        image_url: ImageUrl {
 768                                            url: image.to_base64_url(),
 769                                        },
 770                                    }])
 771                                } else {
 772                                    debug_panic!(
 773                                        "This should be caught at {} level",
 774                                        tool_result.tool_name
 775                                    );
 776                                    "[Tool responded with an image, but this model does not support vision]".to_string().into()
 777                                }
 778                            }
 779                        };
 780
 781                        messages.push(ChatMessage::Tool {
 782                            tool_call_id: tool_result.tool_use_id.to_string(),
 783                            content,
 784                        });
 785                    }
 786                }
 787
 788                let mut content_parts = Vec::new();
 789                for content in &message.content {
 790                    match content {
 791                        MessageContent::Text(text) | MessageContent::Thinking { text, .. }
 792                            if !text.is_empty() =>
 793                        {
 794                            if let Some(ChatMessagePart::Text { text: text_content }) =
 795                                content_parts.last_mut()
 796                            {
 797                                text_content.push_str(text);
 798                            } else {
 799                                content_parts.push(ChatMessagePart::Text {
 800                                    text: text.to_string(),
 801                                });
 802                            }
 803                        }
 804                        MessageContent::Image(image) if model.supports_vision() => {
 805                            content_parts.push(ChatMessagePart::Image {
 806                                image_url: ImageUrl {
 807                                    url: image.to_base64_url(),
 808                                },
 809                            });
 810                        }
 811                        _ => {}
 812                    }
 813                }
 814
 815                if !content_parts.is_empty() {
 816                    messages.push(ChatMessage::User {
 817                        content: content_parts.into(),
 818                    });
 819                }
 820            }
 821            Role::Assistant => {
 822                let mut tool_calls = Vec::new();
 823                for content in &message.content {
 824                    if let MessageContent::ToolUse(tool_use) = content {
 825                        tool_calls.push(ToolCall {
 826                            id: tool_use.id.to_string(),
 827                            content: ToolCallContent::Function {
 828                                function: FunctionContent {
 829                                    name: tool_use.name.to_string(),
 830                                    arguments: serde_json::to_string(&tool_use.input)?,
 831                                    thought_signature: tool_use.thought_signature.clone(),
 832                                },
 833                            },
 834                        });
 835                    }
 836                }
 837
 838                let text_content = {
 839                    let mut buffer = String::new();
 840                    for string in message.content.iter().filter_map(|content| match content {
 841                        MessageContent::Text(text) | MessageContent::Thinking { text, .. } => {
 842                            Some(text.as_str())
 843                        }
 844                        MessageContent::ToolUse(_)
 845                        | MessageContent::RedactedThinking(_)
 846                        | MessageContent::ToolResult(_)
 847                        | MessageContent::Image(_) => None,
 848                    }) {
 849                        buffer.push_str(string);
 850                    }
 851
 852                    buffer
 853                };
 854
 855                // Extract reasoning_opaque and reasoning_text from reasoning_details
 856                let (reasoning_opaque, reasoning_text) =
 857                    if let Some(details) = &message.reasoning_details {
 858                        let opaque = details
 859                            .get("reasoning_opaque")
 860                            .and_then(|v| v.as_str())
 861                            .map(|s| s.to_string());
 862                        let text = details
 863                            .get("reasoning_text")
 864                            .and_then(|v| v.as_str())
 865                            .map(|s| s.to_string());
 866                        (opaque, text)
 867                    } else {
 868                        (None, None)
 869                    };
 870
 871                messages.push(ChatMessage::Assistant {
 872                    content: if text_content.is_empty() {
 873                        ChatMessageContent::empty()
 874                    } else {
 875                        text_content.into()
 876                    },
 877                    tool_calls,
 878                    reasoning_opaque,
 879                    reasoning_text,
 880                });
 881            }
 882            Role::System => messages.push(ChatMessage::System {
 883                content: message.string_contents(),
 884            }),
 885        }
 886    }
 887
 888    let tools = request
 889        .tools
 890        .iter()
 891        .map(|tool| Tool::Function {
 892            function: Function {
 893                name: tool.name.clone(),
 894                description: tool.description.clone(),
 895                parameters: tool.input_schema.clone(),
 896            },
 897        })
 898        .collect::<Vec<_>>();
 899
 900    Ok(CopilotChatRequest {
 901        intent: true,
 902        n: 1,
 903        stream: model.uses_streaming(),
 904        temperature: 0.1,
 905        model: model.id().to_string(),
 906        messages,
 907        tools,
 908        tool_choice: request.tool_choice.map(|choice| match choice {
 909            LanguageModelToolChoice::Auto => ToolChoice::Auto,
 910            LanguageModelToolChoice::Any => ToolChoice::Any,
 911            LanguageModelToolChoice::None => ToolChoice::None,
 912        }),
 913    })
 914}
 915
 916fn into_copilot_responses(
 917    model: &CopilotChatModel,
 918    request: LanguageModelRequest,
 919) -> copilot_responses::Request {
 920    use copilot_responses as responses;
 921
 922    let LanguageModelRequest {
 923        thread_id: _,
 924        prompt_id: _,
 925        intent: _,
 926        messages,
 927        tools,
 928        tool_choice,
 929        stop: _,
 930        temperature,
 931        thinking_allowed: _,
 932    } = request;
 933
 934    let mut input_items: Vec<responses::ResponseInputItem> = Vec::new();
 935
 936    for message in messages {
 937        match message.role {
 938            Role::User => {
 939                for content in &message.content {
 940                    if let MessageContent::ToolResult(tool_result) = content {
 941                        let output = if let Some(out) = &tool_result.output {
 942                            match out {
 943                                serde_json::Value::String(s) => {
 944                                    responses::ResponseFunctionOutput::Text(s.clone())
 945                                }
 946                                serde_json::Value::Null => {
 947                                    responses::ResponseFunctionOutput::Text(String::new())
 948                                }
 949                                other => responses::ResponseFunctionOutput::Text(other.to_string()),
 950                            }
 951                        } else {
 952                            match &tool_result.content {
 953                                LanguageModelToolResultContent::Text(text) => {
 954                                    responses::ResponseFunctionOutput::Text(text.to_string())
 955                                }
 956                                LanguageModelToolResultContent::Image(image) => {
 957                                    if model.supports_vision() {
 958                                        responses::ResponseFunctionOutput::Content(vec![
 959                                            responses::ResponseInputContent::InputImage {
 960                                                image_url: Some(image.to_base64_url()),
 961                                                detail: Default::default(),
 962                                            },
 963                                        ])
 964                                    } else {
 965                                        debug_panic!(
 966                                            "This should be caught at {} level",
 967                                            tool_result.tool_name
 968                                        );
 969                                        responses::ResponseFunctionOutput::Text(
 970                                            "[Tool responded with an image, but this model does not support vision]".into(),
 971                                        )
 972                                    }
 973                                }
 974                            }
 975                        };
 976
 977                        input_items.push(responses::ResponseInputItem::FunctionCallOutput {
 978                            call_id: tool_result.tool_use_id.to_string(),
 979                            output,
 980                            status: None,
 981                        });
 982                    }
 983                }
 984
 985                let mut parts: Vec<responses::ResponseInputContent> = Vec::new();
 986                for content in &message.content {
 987                    match content {
 988                        MessageContent::Text(text) => {
 989                            parts.push(responses::ResponseInputContent::InputText {
 990                                text: text.clone(),
 991                            });
 992                        }
 993
 994                        MessageContent::Image(image) => {
 995                            if model.supports_vision() {
 996                                parts.push(responses::ResponseInputContent::InputImage {
 997                                    image_url: Some(image.to_base64_url()),
 998                                    detail: Default::default(),
 999                                });
1000                            }
1001                        }
1002                        _ => {}
1003                    }
1004                }
1005
1006                if !parts.is_empty() {
1007                    input_items.push(responses::ResponseInputItem::Message {
1008                        role: "user".into(),
1009                        content: Some(parts),
1010                        status: None,
1011                    });
1012                }
1013            }
1014
1015            Role::Assistant => {
1016                for content in &message.content {
1017                    if let MessageContent::ToolUse(tool_use) = content {
1018                        input_items.push(responses::ResponseInputItem::FunctionCall {
1019                            call_id: tool_use.id.to_string(),
1020                            name: tool_use.name.to_string(),
1021                            arguments: tool_use.raw_input.clone(),
1022                            status: None,
1023                            thought_signature: tool_use.thought_signature.clone(),
1024                        });
1025                    }
1026                }
1027
1028                for content in &message.content {
1029                    if let MessageContent::RedactedThinking(data) = content {
1030                        input_items.push(responses::ResponseInputItem::Reasoning {
1031                            id: None,
1032                            summary: Vec::new(),
1033                            encrypted_content: data.clone(),
1034                        });
1035                    }
1036                }
1037
1038                let mut parts: Vec<responses::ResponseInputContent> = Vec::new();
1039                for content in &message.content {
1040                    match content {
1041                        MessageContent::Text(text) => {
1042                            parts.push(responses::ResponseInputContent::OutputText {
1043                                text: text.clone(),
1044                            });
1045                        }
1046                        MessageContent::Image(_) => {
1047                            parts.push(responses::ResponseInputContent::OutputText {
1048                                text: "[image omitted]".to_string(),
1049                            });
1050                        }
1051                        _ => {}
1052                    }
1053                }
1054
1055                if !parts.is_empty() {
1056                    input_items.push(responses::ResponseInputItem::Message {
1057                        role: "assistant".into(),
1058                        content: Some(parts),
1059                        status: Some("completed".into()),
1060                    });
1061                }
1062            }
1063
1064            Role::System => {
1065                let mut parts: Vec<responses::ResponseInputContent> = Vec::new();
1066                for content in &message.content {
1067                    if let MessageContent::Text(text) = content {
1068                        parts.push(responses::ResponseInputContent::InputText {
1069                            text: text.clone(),
1070                        });
1071                    }
1072                }
1073
1074                if !parts.is_empty() {
1075                    input_items.push(responses::ResponseInputItem::Message {
1076                        role: "system".into(),
1077                        content: Some(parts),
1078                        status: None,
1079                    });
1080                }
1081            }
1082        }
1083    }
1084
1085    let converted_tools: Vec<responses::ToolDefinition> = tools
1086        .into_iter()
1087        .map(|tool| responses::ToolDefinition::Function {
1088            name: tool.name,
1089            description: Some(tool.description),
1090            parameters: Some(tool.input_schema),
1091            strict: None,
1092        })
1093        .collect();
1094
1095    let mapped_tool_choice = tool_choice.map(|choice| match choice {
1096        LanguageModelToolChoice::Auto => responses::ToolChoice::Auto,
1097        LanguageModelToolChoice::Any => responses::ToolChoice::Any,
1098        LanguageModelToolChoice::None => responses::ToolChoice::None,
1099    });
1100
1101    responses::Request {
1102        model: model.id().to_string(),
1103        input: input_items,
1104        stream: model.uses_streaming(),
1105        temperature,
1106        tools: converted_tools,
1107        tool_choice: mapped_tool_choice,
1108        reasoning: None, // We would need to add support for setting from user settings.
1109        include: Some(vec![
1110            copilot_responses::ResponseIncludable::ReasoningEncryptedContent,
1111        ]),
1112    }
1113}
1114
1115#[cfg(test)]
1116mod tests {
1117    use super::*;
1118    use copilot_chat::responses;
1119    use futures::StreamExt;
1120
1121    fn map_events(events: Vec<responses::StreamEvent>) -> Vec<LanguageModelCompletionEvent> {
1122        futures::executor::block_on(async {
1123            CopilotResponsesEventMapper::new()
1124                .map_stream(Box::pin(futures::stream::iter(events.into_iter().map(Ok))))
1125                .collect::<Vec<_>>()
1126                .await
1127                .into_iter()
1128                .map(Result::unwrap)
1129                .collect()
1130        })
1131    }
1132
1133    #[test]
1134    fn responses_stream_maps_text_and_usage() {
1135        let events = vec![
1136            responses::StreamEvent::OutputItemAdded {
1137                output_index: 0,
1138                sequence_number: None,
1139                item: responses::ResponseOutputItem::Message {
1140                    id: "msg_1".into(),
1141                    role: "assistant".into(),
1142                    content: Some(Vec::new()),
1143                },
1144            },
1145            responses::StreamEvent::OutputTextDelta {
1146                item_id: "msg_1".into(),
1147                output_index: 0,
1148                delta: "Hello".into(),
1149            },
1150            responses::StreamEvent::Completed {
1151                response: responses::Response {
1152                    usage: Some(responses::ResponseUsage {
1153                        input_tokens: Some(5),
1154                        output_tokens: Some(3),
1155                        total_tokens: Some(8),
1156                    }),
1157                    ..Default::default()
1158                },
1159            },
1160        ];
1161
1162        let mapped = map_events(events);
1163        assert!(matches!(
1164            mapped[0],
1165            LanguageModelCompletionEvent::StartMessage { ref message_id } if message_id == "msg_1"
1166        ));
1167        assert!(matches!(
1168            mapped[1],
1169            LanguageModelCompletionEvent::Text(ref text) if text == "Hello"
1170        ));
1171        assert!(matches!(
1172            mapped[2],
1173            LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
1174                input_tokens: 5,
1175                output_tokens: 3,
1176                ..
1177            })
1178        ));
1179        assert!(matches!(
1180            mapped[3],
1181            LanguageModelCompletionEvent::Stop(StopReason::EndTurn)
1182        ));
1183    }
1184
1185    #[test]
1186    fn responses_stream_maps_tool_calls() {
1187        let events = vec![responses::StreamEvent::OutputItemDone {
1188            output_index: 0,
1189            sequence_number: None,
1190            item: responses::ResponseOutputItem::FunctionCall {
1191                id: Some("fn_1".into()),
1192                call_id: "call_1".into(),
1193                name: "do_it".into(),
1194                arguments: "{\"x\":1}".into(),
1195                status: None,
1196                thought_signature: None,
1197            },
1198        }];
1199
1200        let mapped = map_events(events);
1201        assert!(matches!(
1202            mapped[0],
1203            LanguageModelCompletionEvent::ToolUse(ref use_) if use_.id.to_string() == "call_1" && use_.name.as_ref() == "do_it"
1204        ));
1205        assert!(matches!(
1206            mapped[1],
1207            LanguageModelCompletionEvent::Stop(StopReason::ToolUse)
1208        ));
1209    }
1210
1211    #[test]
1212    fn responses_stream_handles_json_parse_error() {
1213        let events = vec![responses::StreamEvent::OutputItemDone {
1214            output_index: 0,
1215            sequence_number: None,
1216            item: responses::ResponseOutputItem::FunctionCall {
1217                id: Some("fn_1".into()),
1218                call_id: "call_1".into(),
1219                name: "do_it".into(),
1220                arguments: "{not json}".into(),
1221                status: None,
1222                thought_signature: None,
1223            },
1224        }];
1225
1226        let mapped = map_events(events);
1227        assert!(matches!(
1228            mapped[0],
1229            LanguageModelCompletionEvent::ToolUseJsonParseError { ref id, ref tool_name, .. }
1230                if id.to_string() == "call_1" && tool_name.as_ref() == "do_it"
1231        ));
1232        assert!(matches!(
1233            mapped[1],
1234            LanguageModelCompletionEvent::Stop(StopReason::ToolUse)
1235        ));
1236    }
1237
1238    #[test]
1239    fn responses_stream_maps_reasoning_summary_and_encrypted_content() {
1240        let events = vec![responses::StreamEvent::OutputItemDone {
1241            output_index: 0,
1242            sequence_number: None,
1243            item: responses::ResponseOutputItem::Reasoning {
1244                id: "r1".into(),
1245                summary: Some(vec![responses::ResponseReasoningItem {
1246                    kind: "summary_text".into(),
1247                    text: "Chain".into(),
1248                }]),
1249                encrypted_content: Some("ENC".into()),
1250            },
1251        }];
1252
1253        let mapped = map_events(events);
1254        assert!(matches!(
1255            mapped[0],
1256            LanguageModelCompletionEvent::Thinking { ref text, signature: None } if text == "Chain"
1257        ));
1258        assert!(matches!(
1259            mapped[1],
1260            LanguageModelCompletionEvent::RedactedThinking { ref data } if data == "ENC"
1261        ));
1262    }
1263
1264    #[test]
1265    fn responses_stream_handles_incomplete_max_tokens() {
1266        let events = vec![responses::StreamEvent::Incomplete {
1267            response: responses::Response {
1268                usage: Some(responses::ResponseUsage {
1269                    input_tokens: Some(10),
1270                    output_tokens: Some(0),
1271                    total_tokens: Some(10),
1272                }),
1273                incomplete_details: Some(responses::IncompleteDetails {
1274                    reason: Some(responses::IncompleteReason::MaxOutputTokens),
1275                }),
1276                ..Default::default()
1277            },
1278        }];
1279
1280        let mapped = map_events(events);
1281        assert!(matches!(
1282            mapped[0],
1283            LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
1284                input_tokens: 10,
1285                output_tokens: 0,
1286                ..
1287            })
1288        ));
1289        assert!(matches!(
1290            mapped[1],
1291            LanguageModelCompletionEvent::Stop(StopReason::MaxTokens)
1292        ));
1293    }
1294
1295    #[test]
1296    fn responses_stream_handles_incomplete_content_filter() {
1297        let events = vec![responses::StreamEvent::Incomplete {
1298            response: responses::Response {
1299                usage: None,
1300                incomplete_details: Some(responses::IncompleteDetails {
1301                    reason: Some(responses::IncompleteReason::ContentFilter),
1302                }),
1303                ..Default::default()
1304            },
1305        }];
1306
1307        let mapped = map_events(events);
1308        assert!(matches!(
1309            mapped.last().unwrap(),
1310            LanguageModelCompletionEvent::Stop(StopReason::Refusal)
1311        ));
1312    }
1313
1314    #[test]
1315    fn responses_stream_completed_no_duplicate_after_tool_use() {
1316        let events = vec![
1317            responses::StreamEvent::OutputItemDone {
1318                output_index: 0,
1319                sequence_number: None,
1320                item: responses::ResponseOutputItem::FunctionCall {
1321                    id: Some("fn_1".into()),
1322                    call_id: "call_1".into(),
1323                    name: "do_it".into(),
1324                    arguments: "{}".into(),
1325                    status: None,
1326                    thought_signature: None,
1327                },
1328            },
1329            responses::StreamEvent::Completed {
1330                response: responses::Response::default(),
1331            },
1332        ];
1333
1334        let mapped = map_events(events);
1335
1336        let mut stop_count = 0usize;
1337        let mut saw_tool_use_stop = false;
1338        for event in mapped {
1339            if let LanguageModelCompletionEvent::Stop(reason) = event {
1340                stop_count += 1;
1341                if matches!(reason, StopReason::ToolUse) {
1342                    saw_tool_use_stop = true;
1343                }
1344            }
1345        }
1346        assert_eq!(stop_count, 1, "should emit exactly one Stop event");
1347        assert!(saw_tool_use_stop, "Stop reason should be ToolUse");
1348    }
1349
1350    #[test]
1351    fn responses_stream_failed_maps_http_response_error() {
1352        let events = vec![responses::StreamEvent::Failed {
1353            response: responses::Response {
1354                error: Some(responses::ResponseError {
1355                    code: "429".into(),
1356                    message: "too many requests".into(),
1357                }),
1358                ..Default::default()
1359            },
1360        }];
1361
1362        let mapped_results = futures::executor::block_on(async {
1363            CopilotResponsesEventMapper::new()
1364                .map_stream(Box::pin(futures::stream::iter(events.into_iter().map(Ok))))
1365                .collect::<Vec<_>>()
1366                .await
1367        });
1368
1369        assert_eq!(mapped_results.len(), 1);
1370        match &mapped_results[0] {
1371            Err(LanguageModelCompletionError::HttpResponseError {
1372                status_code,
1373                message,
1374                ..
1375            }) => {
1376                assert_eq!(*status_code, http_client::StatusCode::TOO_MANY_REQUESTS);
1377                assert_eq!(message, "too many requests");
1378            }
1379            other => panic!("expected HttpResponseError, got {:?}", other),
1380        }
1381    }
1382
1383    #[test]
1384    fn chat_completions_stream_maps_reasoning_data() {
1385        use copilot_chat::{
1386            FunctionChunk, ResponseChoice, ResponseDelta, ResponseEvent, Role, ToolCallChunk,
1387        };
1388
1389        let events = vec![
1390            ResponseEvent {
1391                choices: vec![ResponseChoice {
1392                    index: Some(0),
1393                    finish_reason: None,
1394                    delta: Some(ResponseDelta {
1395                        content: None,
1396                        role: Some(Role::Assistant),
1397                        tool_calls: vec![ToolCallChunk {
1398                            index: Some(0),
1399                            id: Some("call_abc123".to_string()),
1400                            function: Some(FunctionChunk {
1401                                name: Some("list_directory".to_string()),
1402                                arguments: Some("{\"path\":\"test\"}".to_string()),
1403                                thought_signature: None,
1404                            }),
1405                        }],
1406                        reasoning_opaque: Some("encrypted_reasoning_token_xyz".to_string()),
1407                        reasoning_text: Some("Let me check the directory".to_string()),
1408                    }),
1409                    message: None,
1410                }],
1411                id: "chatcmpl-123".to_string(),
1412                usage: None,
1413            },
1414            ResponseEvent {
1415                choices: vec![ResponseChoice {
1416                    index: Some(0),
1417                    finish_reason: Some("tool_calls".to_string()),
1418                    delta: Some(ResponseDelta {
1419                        content: None,
1420                        role: None,
1421                        tool_calls: vec![],
1422                        reasoning_opaque: None,
1423                        reasoning_text: None,
1424                    }),
1425                    message: None,
1426                }],
1427                id: "chatcmpl-123".to_string(),
1428                usage: None,
1429            },
1430        ];
1431
1432        let mapped = futures::executor::block_on(async {
1433            map_to_language_model_completion_events(
1434                Box::pin(futures::stream::iter(events.into_iter().map(Ok))),
1435                true,
1436            )
1437            .collect::<Vec<_>>()
1438            .await
1439        });
1440
1441        let mut has_reasoning_details = false;
1442        let mut has_tool_use = false;
1443        let mut reasoning_opaque_value: Option<String> = None;
1444        let mut reasoning_text_value: Option<String> = None;
1445
1446        for event_result in mapped {
1447            match event_result {
1448                Ok(LanguageModelCompletionEvent::ReasoningDetails(details)) => {
1449                    has_reasoning_details = true;
1450                    reasoning_opaque_value = details
1451                        .get("reasoning_opaque")
1452                        .and_then(|v| v.as_str())
1453                        .map(|s| s.to_string());
1454                    reasoning_text_value = details
1455                        .get("reasoning_text")
1456                        .and_then(|v| v.as_str())
1457                        .map(|s| s.to_string());
1458                }
1459                Ok(LanguageModelCompletionEvent::ToolUse(tool_use)) => {
1460                    has_tool_use = true;
1461                    assert_eq!(tool_use.id.to_string(), "call_abc123");
1462                    assert_eq!(tool_use.name.as_ref(), "list_directory");
1463                }
1464                _ => {}
1465            }
1466        }
1467
1468        assert!(
1469            has_reasoning_details,
1470            "Should emit ReasoningDetails event for Gemini 3 reasoning"
1471        );
1472        assert!(has_tool_use, "Should emit ToolUse event");
1473        assert_eq!(
1474            reasoning_opaque_value,
1475            Some("encrypted_reasoning_token_xyz".to_string()),
1476            "Should capture reasoning_opaque"
1477        );
1478        assert_eq!(
1479            reasoning_text_value,
1480            Some("Let me check the directory".to_string()),
1481            "Should capture reasoning_text"
1482        );
1483    }
1484}