copilot_chat.rs

   1use std::pin::Pin;
   2use std::str::FromStr as _;
   3use std::sync::Arc;
   4
   5use anthropic::AnthropicModelMode;
   6use anyhow::{Result, anyhow};
   7use collections::HashMap;
   8use copilot::{GlobalCopilotAuth, Status};
   9use copilot_chat::responses as copilot_responses;
  10use copilot_chat::{
  11    ChatLocation, ChatMessage, ChatMessageContent, ChatMessagePart, CopilotChat,
  12    CopilotChatConfiguration, Function, FunctionContent, ImageUrl, Model as CopilotChatModel,
  13    ModelVendor, Request as CopilotChatRequest, ResponseEvent, Tool, ToolCall, ToolCallContent,
  14    ToolChoice,
  15};
  16use futures::future::BoxFuture;
  17use futures::stream::BoxStream;
  18use futures::{FutureExt, Stream, StreamExt};
  19use gpui::{AnyView, App, AsyncApp, Entity, Subscription, Task};
  20use http_client::StatusCode;
  21use language::language_settings::all_language_settings;
  22use language_model::{
  23    AuthenticateError, CompletionIntent, IconOrSvg, LanguageModel, LanguageModelCompletionError,
  24    LanguageModelCompletionEvent, LanguageModelCostInfo, LanguageModelEffortLevel, LanguageModelId,
  25    LanguageModelName, LanguageModelProvider, LanguageModelProviderId, LanguageModelProviderName,
  26    LanguageModelProviderState, LanguageModelRequest, LanguageModelRequestMessage,
  27    LanguageModelToolChoice, LanguageModelToolResultContent, LanguageModelToolSchemaFormat,
  28    LanguageModelToolUse, MessageContent, RateLimiter, Role, StopReason, TokenUsage,
  29};
  30use settings::SettingsStore;
  31use ui::prelude::*;
  32use util::debug_panic;
  33
  34use crate::provider::anthropic::{AnthropicEventMapper, into_anthropic};
  35use crate::provider::util::{fix_streamed_json, parse_tool_arguments};
  36
  37const PROVIDER_ID: LanguageModelProviderId = LanguageModelProviderId::new("copilot_chat");
  38const PROVIDER_NAME: LanguageModelProviderName =
  39    LanguageModelProviderName::new("GitHub Copilot Chat");
  40
  41pub struct CopilotChatLanguageModelProvider {
  42    state: Entity<State>,
  43}
  44
  45pub struct State {
  46    _copilot_chat_subscription: Option<Subscription>,
  47    _settings_subscription: Subscription,
  48}
  49
  50impl State {
  51    fn is_authenticated(&self, cx: &App) -> bool {
  52        CopilotChat::global(cx)
  53            .map(|m| m.read(cx).is_authenticated())
  54            .unwrap_or(false)
  55    }
  56}
  57
  58impl CopilotChatLanguageModelProvider {
  59    pub fn new(cx: &mut App) -> Self {
  60        let state = cx.new(|cx| {
  61            let copilot_chat_subscription = CopilotChat::global(cx)
  62                .map(|copilot_chat| cx.observe(&copilot_chat, |_, _, cx| cx.notify()));
  63            State {
  64                _copilot_chat_subscription: copilot_chat_subscription,
  65                _settings_subscription: cx.observe_global::<SettingsStore>(|_, cx| {
  66                    if let Some(copilot_chat) = CopilotChat::global(cx) {
  67                        let language_settings = all_language_settings(None, cx);
  68                        let configuration = CopilotChatConfiguration {
  69                            enterprise_uri: language_settings
  70                                .edit_predictions
  71                                .copilot
  72                                .enterprise_uri
  73                                .clone(),
  74                        };
  75                        copilot_chat.update(cx, |chat, cx| {
  76                            chat.set_configuration(configuration, cx);
  77                        });
  78                    }
  79                    cx.notify();
  80                }),
  81            }
  82        });
  83
  84        Self { state }
  85    }
  86
  87    fn create_language_model(&self, model: CopilotChatModel) -> Arc<dyn LanguageModel> {
  88        Arc::new(CopilotChatLanguageModel {
  89            model,
  90            request_limiter: RateLimiter::new(4),
  91        })
  92    }
  93}
  94
  95impl LanguageModelProviderState for CopilotChatLanguageModelProvider {
  96    type ObservableEntity = State;
  97
  98    fn observable_entity(&self) -> Option<Entity<Self::ObservableEntity>> {
  99        Some(self.state.clone())
 100    }
 101}
 102
 103impl LanguageModelProvider for CopilotChatLanguageModelProvider {
 104    fn id(&self) -> LanguageModelProviderId {
 105        PROVIDER_ID
 106    }
 107
 108    fn name(&self) -> LanguageModelProviderName {
 109        PROVIDER_NAME
 110    }
 111
 112    fn icon(&self) -> IconOrSvg {
 113        IconOrSvg::Icon(IconName::Copilot)
 114    }
 115
 116    fn default_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
 117        let models = CopilotChat::global(cx).and_then(|m| m.read(cx).models())?;
 118        models
 119            .first()
 120            .map(|model| self.create_language_model(model.clone()))
 121    }
 122
 123    fn default_fast_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
 124        // The default model should be Copilot Chat's 'base model', which is likely a relatively fast
 125        // model (e.g. 4o) and a sensible choice when considering premium requests
 126        self.default_model(cx)
 127    }
 128
 129    fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
 130        let Some(models) = CopilotChat::global(cx).and_then(|m| m.read(cx).models()) else {
 131            return Vec::new();
 132        };
 133        models
 134            .iter()
 135            .map(|model| self.create_language_model(model.clone()))
 136            .collect()
 137    }
 138
 139    fn is_authenticated(&self, cx: &App) -> bool {
 140        self.state.read(cx).is_authenticated(cx)
 141    }
 142
 143    fn authenticate(&self, cx: &mut App) -> Task<Result<(), AuthenticateError>> {
 144        if self.is_authenticated(cx) {
 145            return Task::ready(Ok(()));
 146        };
 147
 148        let Some(copilot) = GlobalCopilotAuth::try_global(cx).cloned() else {
 149            return Task::ready(Err(anyhow!(concat!(
 150                "Copilot must be enabled for Copilot Chat to work. ",
 151                "Please enable Copilot and try again."
 152            ))
 153            .into()));
 154        };
 155
 156        let err = match copilot.0.read(cx).status() {
 157            Status::Authorized => return Task::ready(Ok(())),
 158            Status::Disabled => anyhow!(
 159                "Copilot must be enabled for Copilot Chat to work. Please enable Copilot and try again."
 160            ),
 161            Status::Error(err) => anyhow!(format!(
 162                "Received the following error while signing into Copilot: {err}"
 163            )),
 164            Status::Starting { task: _ } => anyhow!(
 165                "Copilot is still starting, please wait for Copilot to start then try again"
 166            ),
 167            Status::Unauthorized => anyhow!(
 168                "Unable to authorize with Copilot. Please make sure that you have an active Copilot and Copilot Chat subscription."
 169            ),
 170            Status::SignedOut { .. } => {
 171                anyhow!("You have signed out of Copilot. Please sign in to Copilot and try again.")
 172            }
 173            Status::SigningIn { prompt: _ } => anyhow!("Still signing into Copilot..."),
 174        };
 175
 176        Task::ready(Err(err.into()))
 177    }
 178
 179    fn configuration_view(
 180        &self,
 181        _target_agent: language_model::ConfigurationViewTargetAgent,
 182        _: &mut Window,
 183        cx: &mut App,
 184    ) -> AnyView {
 185        cx.new(|cx| {
 186            copilot_ui::ConfigurationView::new(
 187                |cx| {
 188                    CopilotChat::global(cx)
 189                        .map(|m| m.read(cx).is_authenticated())
 190                        .unwrap_or(false)
 191                },
 192                copilot_ui::ConfigurationMode::Chat,
 193                cx,
 194            )
 195        })
 196        .into()
 197    }
 198
 199    fn reset_credentials(&self, _cx: &mut App) -> Task<Result<()>> {
 200        Task::ready(Err(anyhow!(
 201            "Signing out of GitHub Copilot Chat is currently not supported."
 202        )))
 203    }
 204}
 205
 206fn collect_tiktoken_messages(
 207    request: LanguageModelRequest,
 208) -> Vec<tiktoken_rs::ChatCompletionRequestMessage> {
 209    request
 210        .messages
 211        .into_iter()
 212        .map(|message| tiktoken_rs::ChatCompletionRequestMessage {
 213            role: match message.role {
 214                Role::User => "user".into(),
 215                Role::Assistant => "assistant".into(),
 216                Role::System => "system".into(),
 217            },
 218            content: Some(message.string_contents()),
 219            name: None,
 220            function_call: None,
 221        })
 222        .collect::<Vec<_>>()
 223}
 224
 225pub struct CopilotChatLanguageModel {
 226    model: CopilotChatModel,
 227    request_limiter: RateLimiter,
 228}
 229
 230impl LanguageModel for CopilotChatLanguageModel {
 231    fn id(&self) -> LanguageModelId {
 232        LanguageModelId::from(self.model.id().to_string())
 233    }
 234
 235    fn name(&self) -> LanguageModelName {
 236        LanguageModelName::from(self.model.display_name().to_string())
 237    }
 238
 239    fn provider_id(&self) -> LanguageModelProviderId {
 240        PROVIDER_ID
 241    }
 242
 243    fn provider_name(&self) -> LanguageModelProviderName {
 244        PROVIDER_NAME
 245    }
 246
 247    fn supports_tools(&self) -> bool {
 248        self.model.supports_tools()
 249    }
 250
 251    fn supports_streaming_tools(&self) -> bool {
 252        true
 253    }
 254
 255    fn supports_images(&self) -> bool {
 256        self.model.supports_vision()
 257    }
 258
 259    fn supports_thinking(&self) -> bool {
 260        self.model.can_think()
 261    }
 262
 263    fn supported_effort_levels(&self) -> Vec<LanguageModelEffortLevel> {
 264        let levels = self.model.reasoning_effort_levels();
 265        if levels.is_empty() {
 266            return vec![];
 267        }
 268        levels
 269            .iter()
 270            .map(|level| {
 271                let name: SharedString = match level.as_str() {
 272                    "low" => "Low".into(),
 273                    "medium" => "Medium".into(),
 274                    "high" => "High".into(),
 275                    _ => SharedString::from(level.clone()),
 276                };
 277                LanguageModelEffortLevel {
 278                    name,
 279                    value: SharedString::from(level.clone()),
 280                    is_default: level == "high",
 281                }
 282            })
 283            .collect()
 284    }
 285
 286    fn tool_input_format(&self) -> LanguageModelToolSchemaFormat {
 287        match self.model.vendor() {
 288            ModelVendor::OpenAI | ModelVendor::Anthropic => {
 289                LanguageModelToolSchemaFormat::JsonSchema
 290            }
 291            ModelVendor::Google | ModelVendor::XAI | ModelVendor::Unknown => {
 292                LanguageModelToolSchemaFormat::JsonSchemaSubset
 293            }
 294        }
 295    }
 296
 297    fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
 298        match choice {
 299            LanguageModelToolChoice::Auto
 300            | LanguageModelToolChoice::Any
 301            | LanguageModelToolChoice::None => self.supports_tools(),
 302        }
 303    }
 304
 305    fn model_cost_info(&self) -> Option<LanguageModelCostInfo> {
 306        LanguageModelCostInfo::RequestCost {
 307            cost_per_request: self.model.multiplier(),
 308        }
 309        .into()
 310    }
 311
 312    fn telemetry_id(&self) -> String {
 313        format!("copilot_chat/{}", self.model.id())
 314    }
 315
 316    fn max_token_count(&self) -> u64 {
 317        self.model.max_token_count()
 318    }
 319
 320    fn count_tokens(
 321        &self,
 322        request: LanguageModelRequest,
 323        cx: &App,
 324    ) -> BoxFuture<'static, Result<u64>> {
 325        let model = self.model.clone();
 326        cx.background_spawn(async move {
 327            let messages = collect_tiktoken_messages(request);
 328            // Copilot uses OpenAI tiktoken tokenizer for all it's model irrespective of the underlying provider(vendor).
 329            let tokenizer_model = match model.tokenizer() {
 330                Some("o200k_base") => "gpt-4o",
 331                Some("cl100k_base") => "gpt-4",
 332                _ => "gpt-4o",
 333            };
 334
 335            tiktoken_rs::num_tokens_from_messages(tokenizer_model, &messages)
 336                .map(|tokens| tokens as u64)
 337        })
 338        .boxed()
 339    }
 340
 341    fn stream_completion(
 342        &self,
 343        request: LanguageModelRequest,
 344        cx: &AsyncApp,
 345    ) -> BoxFuture<
 346        'static,
 347        Result<
 348            BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>,
 349            LanguageModelCompletionError,
 350        >,
 351    > {
 352        let is_user_initiated = request.intent.is_none_or(|intent| match intent {
 353            CompletionIntent::UserPrompt
 354            | CompletionIntent::ThreadContextSummarization
 355            | CompletionIntent::InlineAssist
 356            | CompletionIntent::TerminalInlineAssist
 357            | CompletionIntent::GenerateGitCommitMessage => true,
 358
 359            CompletionIntent::Subagent
 360            | CompletionIntent::ToolResults
 361            | CompletionIntent::ThreadSummarization
 362            | CompletionIntent::CreateFile
 363            | CompletionIntent::EditFile => false,
 364        });
 365
 366        if self.model.supports_messages() {
 367            let location = intent_to_chat_location(request.intent);
 368            let model = self.model.clone();
 369            let request_limiter = self.request_limiter.clone();
 370            let future = cx.spawn(async move |cx| {
 371                let effort = request
 372                    .thinking_effort
 373                    .as_ref()
 374                    .and_then(|e| anthropic::Effort::from_str(e).ok());
 375
 376                let mut anthropic_request = into_anthropic(
 377                    request,
 378                    model.id().to_string(),
 379                    0.0,
 380                    model.max_output_tokens() as u64,
 381                    if model.supports_adaptive_thinking() {
 382                        AnthropicModelMode::Thinking {
 383                            budget_tokens: None,
 384                        }
 385                    } else if model.can_think() {
 386                        AnthropicModelMode::Thinking {
 387                            budget_tokens: compute_thinking_budget(
 388                                model.min_thinking_budget(),
 389                                model.max_thinking_budget(),
 390                                model.max_output_tokens() as u32,
 391                            ),
 392                        }
 393                    } else {
 394                        AnthropicModelMode::Default
 395                    },
 396                );
 397
 398                anthropic_request.temperature = None;
 399
 400                // The Copilot proxy doesn't support eager_input_streaming on tools.
 401                for tool in &mut anthropic_request.tools {
 402                    tool.eager_input_streaming = false;
 403                }
 404
 405                if model.supports_adaptive_thinking() {
 406                    if anthropic_request.thinking.is_some() {
 407                        anthropic_request.thinking = Some(anthropic::Thinking::Adaptive);
 408                        anthropic_request.output_config = Some(anthropic::OutputConfig { effort });
 409                    }
 410                }
 411
 412                let anthropic_beta = if !model.supports_adaptive_thinking() && model.can_think() {
 413                    Some("interleaved-thinking-2025-05-14".to_string())
 414                } else {
 415                    None
 416                };
 417
 418                let body = serde_json::to_string(&anthropic::StreamingRequest {
 419                    base: anthropic_request,
 420                    stream: true,
 421                })
 422                .map_err(|e| anyhow::anyhow!(e))?;
 423
 424                let stream = CopilotChat::stream_messages(
 425                    body,
 426                    location,
 427                    is_user_initiated,
 428                    anthropic_beta,
 429                    cx.clone(),
 430                );
 431
 432                request_limiter
 433                    .stream(async move {
 434                        let events = stream.await?;
 435                        let mapper = AnthropicEventMapper::new();
 436                        Ok(mapper.map_stream(events).boxed())
 437                    })
 438                    .await
 439            });
 440            return async move { Ok(future.await?.boxed()) }.boxed();
 441        }
 442
 443        if self.model.supports_response() {
 444            let location = intent_to_chat_location(request.intent);
 445            let responses_request = into_copilot_responses(&self.model, request);
 446            let request_limiter = self.request_limiter.clone();
 447            let future = cx.spawn(async move |cx| {
 448                let request = CopilotChat::stream_response(
 449                    responses_request,
 450                    location,
 451                    is_user_initiated,
 452                    cx.clone(),
 453                );
 454                request_limiter
 455                    .stream(async move {
 456                        let stream = request.await?;
 457                        let mapper = CopilotResponsesEventMapper::new();
 458                        Ok(mapper.map_stream(stream).boxed())
 459                    })
 460                    .await
 461            });
 462            return async move { Ok(future.await?.boxed()) }.boxed();
 463        }
 464
 465        let location = intent_to_chat_location(request.intent);
 466        let copilot_request = match into_copilot_chat(&self.model, request) {
 467            Ok(request) => request,
 468            Err(err) => return futures::future::ready(Err(err.into())).boxed(),
 469        };
 470        let is_streaming = copilot_request.stream;
 471
 472        let request_limiter = self.request_limiter.clone();
 473        let future = cx.spawn(async move |cx| {
 474            let request = CopilotChat::stream_completion(
 475                copilot_request,
 476                location,
 477                is_user_initiated,
 478                cx.clone(),
 479            );
 480            request_limiter
 481                .stream(async move {
 482                    let response = request.await?;
 483                    Ok(map_to_language_model_completion_events(
 484                        response,
 485                        is_streaming,
 486                    ))
 487                })
 488                .await
 489        });
 490        async move { Ok(future.await?.boxed()) }.boxed()
 491    }
 492}
 493
 494pub fn map_to_language_model_completion_events(
 495    events: Pin<Box<dyn Send + Stream<Item = Result<ResponseEvent>>>>,
 496    is_streaming: bool,
 497) -> impl Stream<Item = Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
 498    #[derive(Default)]
 499    struct RawToolCall {
 500        id: String,
 501        name: String,
 502        arguments: String,
 503        thought_signature: Option<String>,
 504    }
 505
 506    struct State {
 507        events: Pin<Box<dyn Send + Stream<Item = Result<ResponseEvent>>>>,
 508        tool_calls_by_index: HashMap<usize, RawToolCall>,
 509        reasoning_opaque: Option<String>,
 510        reasoning_text: Option<String>,
 511    }
 512
 513    futures::stream::unfold(
 514        State {
 515            events,
 516            tool_calls_by_index: HashMap::default(),
 517            reasoning_opaque: None,
 518            reasoning_text: None,
 519        },
 520        move |mut state| async move {
 521            if let Some(event) = state.events.next().await {
 522                match event {
 523                    Ok(event) => {
 524                        let Some(choice) = event.choices.first() else {
 525                            return Some((
 526                                vec![Err(anyhow!("Response contained no choices").into())],
 527                                state,
 528                            ));
 529                        };
 530
 531                        let delta = if is_streaming {
 532                            choice.delta.as_ref()
 533                        } else {
 534                            choice.message.as_ref()
 535                        };
 536
 537                        let Some(delta) = delta else {
 538                            return Some((
 539                                vec![Err(anyhow!("Response contained no delta").into())],
 540                                state,
 541                            ));
 542                        };
 543
 544                        let mut events = Vec::new();
 545                        if let Some(content) = delta.content.clone() {
 546                            events.push(Ok(LanguageModelCompletionEvent::Text(content)));
 547                        }
 548
 549                        // Capture reasoning data from the delta (e.g. for Gemini 3)
 550                        if let Some(opaque) = delta.reasoning_opaque.clone() {
 551                            state.reasoning_opaque = Some(opaque);
 552                        }
 553                        if let Some(text) = delta.reasoning_text.clone() {
 554                            state.reasoning_text = Some(text);
 555                        }
 556
 557                        for (index, tool_call) in delta.tool_calls.iter().enumerate() {
 558                            let tool_index = tool_call.index.unwrap_or(index);
 559                            let entry = state.tool_calls_by_index.entry(tool_index).or_default();
 560
 561                            if let Some(tool_id) = tool_call.id.clone() {
 562                                entry.id = tool_id;
 563                            }
 564
 565                            if let Some(function) = tool_call.function.as_ref() {
 566                                if let Some(name) = function.name.clone() {
 567                                    entry.name = name;
 568                                }
 569
 570                                if let Some(arguments) = function.arguments.clone() {
 571                                    entry.arguments.push_str(&arguments);
 572                                }
 573
 574                                if let Some(thought_signature) = function.thought_signature.clone()
 575                                {
 576                                    entry.thought_signature = Some(thought_signature);
 577                                }
 578                            }
 579
 580                            if !entry.id.is_empty() && !entry.name.is_empty() {
 581                                if let Ok(input) = serde_json::from_str::<serde_json::Value>(
 582                                    &fix_streamed_json(&entry.arguments),
 583                                ) {
 584                                    events.push(Ok(LanguageModelCompletionEvent::ToolUse(
 585                                        LanguageModelToolUse {
 586                                            id: entry.id.clone().into(),
 587                                            name: entry.name.as_str().into(),
 588                                            is_input_complete: false,
 589                                            input,
 590                                            raw_input: entry.arguments.clone(),
 591                                            thought_signature: entry.thought_signature.clone(),
 592                                        },
 593                                    )));
 594                                }
 595                            }
 596                        }
 597
 598                        if let Some(usage) = event.usage {
 599                            events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(
 600                                TokenUsage {
 601                                    input_tokens: usage.prompt_tokens,
 602                                    output_tokens: usage.completion_tokens,
 603                                    cache_creation_input_tokens: 0,
 604                                    cache_read_input_tokens: 0,
 605                                },
 606                            )));
 607                        }
 608
 609                        match choice.finish_reason.as_deref() {
 610                            Some("stop") => {
 611                                events.push(Ok(LanguageModelCompletionEvent::Stop(
 612                                    StopReason::EndTurn,
 613                                )));
 614                            }
 615                            Some("tool_calls") => {
 616                                // Gemini 3 models send reasoning_opaque/reasoning_text that must
 617                                // be preserved and sent back in subsequent requests. Emit as
 618                                // ReasoningDetails so the agent stores it in the message.
 619                                if state.reasoning_opaque.is_some()
 620                                    || state.reasoning_text.is_some()
 621                                {
 622                                    let mut details = serde_json::Map::new();
 623                                    if let Some(opaque) = state.reasoning_opaque.take() {
 624                                        details.insert(
 625                                            "reasoning_opaque".to_string(),
 626                                            serde_json::Value::String(opaque),
 627                                        );
 628                                    }
 629                                    if let Some(text) = state.reasoning_text.take() {
 630                                        details.insert(
 631                                            "reasoning_text".to_string(),
 632                                            serde_json::Value::String(text),
 633                                        );
 634                                    }
 635                                    events.push(Ok(
 636                                        LanguageModelCompletionEvent::ReasoningDetails(
 637                                            serde_json::Value::Object(details),
 638                                        ),
 639                                    ));
 640                                }
 641
 642                                events.extend(state.tool_calls_by_index.drain().map(
 643                                    |(_, tool_call)| match parse_tool_arguments(
 644                                        &tool_call.arguments,
 645                                    ) {
 646                                        Ok(input) => Ok(LanguageModelCompletionEvent::ToolUse(
 647                                            LanguageModelToolUse {
 648                                                id: tool_call.id.into(),
 649                                                name: tool_call.name.as_str().into(),
 650                                                is_input_complete: true,
 651                                                input,
 652                                                raw_input: tool_call.arguments,
 653                                                thought_signature: tool_call.thought_signature,
 654                                            },
 655                                        )),
 656                                        Err(error) => Ok(
 657                                            LanguageModelCompletionEvent::ToolUseJsonParseError {
 658                                                id: tool_call.id.into(),
 659                                                tool_name: tool_call.name.as_str().into(),
 660                                                raw_input: tool_call.arguments.into(),
 661                                                json_parse_error: error.to_string(),
 662                                            },
 663                                        ),
 664                                    },
 665                                ));
 666
 667                                events.push(Ok(LanguageModelCompletionEvent::Stop(
 668                                    StopReason::ToolUse,
 669                                )));
 670                            }
 671                            Some(stop_reason) => {
 672                                log::error!("Unexpected Copilot Chat stop_reason: {stop_reason:?}");
 673                                events.push(Ok(LanguageModelCompletionEvent::Stop(
 674                                    StopReason::EndTurn,
 675                                )));
 676                            }
 677                            None => {}
 678                        }
 679
 680                        return Some((events, state));
 681                    }
 682                    Err(err) => return Some((vec![Err(anyhow!(err).into())], state)),
 683                }
 684            }
 685
 686            None
 687        },
 688    )
 689    .flat_map(futures::stream::iter)
 690}
 691
 692pub struct CopilotResponsesEventMapper {
 693    pending_stop_reason: Option<StopReason>,
 694}
 695
 696impl CopilotResponsesEventMapper {
 697    pub fn new() -> Self {
 698        Self {
 699            pending_stop_reason: None,
 700        }
 701    }
 702
 703    pub fn map_stream(
 704        mut self,
 705        events: Pin<Box<dyn Send + Stream<Item = Result<copilot_responses::StreamEvent>>>>,
 706    ) -> impl Stream<Item = Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
 707    {
 708        events.flat_map(move |event| {
 709            futures::stream::iter(match event {
 710                Ok(event) => self.map_event(event),
 711                Err(error) => vec![Err(LanguageModelCompletionError::from(anyhow!(error)))],
 712            })
 713        })
 714    }
 715
 716    fn map_event(
 717        &mut self,
 718        event: copilot_responses::StreamEvent,
 719    ) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
 720        match event {
 721            copilot_responses::StreamEvent::OutputItemAdded { item, .. } => match item {
 722                copilot_responses::ResponseOutputItem::Message { id, .. } => {
 723                    vec![Ok(LanguageModelCompletionEvent::StartMessage {
 724                        message_id: id,
 725                    })]
 726                }
 727                _ => Vec::new(),
 728            },
 729
 730            copilot_responses::StreamEvent::OutputTextDelta { delta, .. } => {
 731                if delta.is_empty() {
 732                    Vec::new()
 733                } else {
 734                    vec![Ok(LanguageModelCompletionEvent::Text(delta))]
 735                }
 736            }
 737
 738            copilot_responses::StreamEvent::OutputItemDone { item, .. } => match item {
 739                copilot_responses::ResponseOutputItem::Message { .. } => Vec::new(),
 740                copilot_responses::ResponseOutputItem::FunctionCall {
 741                    call_id,
 742                    name,
 743                    arguments,
 744                    thought_signature,
 745                    ..
 746                } => {
 747                    let mut events = Vec::new();
 748                    match parse_tool_arguments(&arguments) {
 749                        Ok(input) => events.push(Ok(LanguageModelCompletionEvent::ToolUse(
 750                            LanguageModelToolUse {
 751                                id: call_id.into(),
 752                                name: name.as_str().into(),
 753                                is_input_complete: true,
 754                                input,
 755                                raw_input: arguments.clone(),
 756                                thought_signature,
 757                            },
 758                        ))),
 759                        Err(error) => {
 760                            events.push(Ok(LanguageModelCompletionEvent::ToolUseJsonParseError {
 761                                id: call_id.into(),
 762                                tool_name: name.as_str().into(),
 763                                raw_input: arguments.clone().into(),
 764                                json_parse_error: error.to_string(),
 765                            }))
 766                        }
 767                    }
 768                    // Record that we already emitted a tool-use stop so we can avoid duplicating
 769                    // a Stop event on Completed.
 770                    self.pending_stop_reason = Some(StopReason::ToolUse);
 771                    events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::ToolUse)));
 772                    events
 773                }
 774                copilot_responses::ResponseOutputItem::Reasoning {
 775                    summary,
 776                    encrypted_content,
 777                    ..
 778                } => {
 779                    let mut events = Vec::new();
 780
 781                    if let Some(blocks) = summary {
 782                        let mut text = String::new();
 783                        for block in blocks {
 784                            text.push_str(&block.text);
 785                        }
 786                        if !text.is_empty() {
 787                            events.push(Ok(LanguageModelCompletionEvent::Thinking {
 788                                text,
 789                                signature: None,
 790                            }));
 791                        }
 792                    }
 793
 794                    if let Some(data) = encrypted_content {
 795                        events.push(Ok(LanguageModelCompletionEvent::RedactedThinking { data }));
 796                    }
 797
 798                    events
 799                }
 800            },
 801
 802            copilot_responses::StreamEvent::Completed { response } => {
 803                let mut events = Vec::new();
 804                if let Some(usage) = response.usage {
 805                    events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
 806                        input_tokens: usage.input_tokens.unwrap_or(0),
 807                        output_tokens: usage.output_tokens.unwrap_or(0),
 808                        cache_creation_input_tokens: 0,
 809                        cache_read_input_tokens: 0,
 810                    })));
 811                }
 812                if self.pending_stop_reason.take() != Some(StopReason::ToolUse) {
 813                    events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::EndTurn)));
 814                }
 815                events
 816            }
 817
 818            copilot_responses::StreamEvent::Incomplete { response } => {
 819                let reason = response
 820                    .incomplete_details
 821                    .as_ref()
 822                    .and_then(|details| details.reason.as_ref());
 823                let stop_reason = match reason {
 824                    Some(copilot_responses::IncompleteReason::MaxOutputTokens) => {
 825                        StopReason::MaxTokens
 826                    }
 827                    Some(copilot_responses::IncompleteReason::ContentFilter) => StopReason::Refusal,
 828                    _ => self
 829                        .pending_stop_reason
 830                        .take()
 831                        .unwrap_or(StopReason::EndTurn),
 832                };
 833
 834                let mut events = Vec::new();
 835                if let Some(usage) = response.usage {
 836                    events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
 837                        input_tokens: usage.input_tokens.unwrap_or(0),
 838                        output_tokens: usage.output_tokens.unwrap_or(0),
 839                        cache_creation_input_tokens: 0,
 840                        cache_read_input_tokens: 0,
 841                    })));
 842                }
 843                events.push(Ok(LanguageModelCompletionEvent::Stop(stop_reason)));
 844                events
 845            }
 846
 847            copilot_responses::StreamEvent::Failed { response } => {
 848                let provider = PROVIDER_NAME;
 849                let (status_code, message) = match response.error {
 850                    Some(error) => {
 851                        let status_code = StatusCode::from_str(&error.code)
 852                            .unwrap_or(StatusCode::INTERNAL_SERVER_ERROR);
 853                        (status_code, error.message)
 854                    }
 855                    None => (
 856                        StatusCode::INTERNAL_SERVER_ERROR,
 857                        "response.failed".to_string(),
 858                    ),
 859                };
 860                vec![Err(LanguageModelCompletionError::HttpResponseError {
 861                    provider,
 862                    status_code,
 863                    message,
 864                })]
 865            }
 866
 867            copilot_responses::StreamEvent::GenericError { error } => vec![Err(
 868                LanguageModelCompletionError::Other(anyhow!(error.message)),
 869            )],
 870
 871            copilot_responses::StreamEvent::Created { .. }
 872            | copilot_responses::StreamEvent::Unknown => Vec::new(),
 873        }
 874    }
 875}
 876
 877fn into_copilot_chat(
 878    model: &CopilotChatModel,
 879    request: LanguageModelRequest,
 880) -> Result<CopilotChatRequest> {
 881    let temperature = request.temperature;
 882    let tool_choice = request.tool_choice;
 883
 884    let mut request_messages: Vec<LanguageModelRequestMessage> = Vec::new();
 885    for message in request.messages {
 886        if let Some(last_message) = request_messages.last_mut() {
 887            if last_message.role == message.role {
 888                last_message.content.extend(message.content);
 889            } else {
 890                request_messages.push(message);
 891            }
 892        } else {
 893            request_messages.push(message);
 894        }
 895    }
 896
 897    let mut messages: Vec<ChatMessage> = Vec::new();
 898    for message in request_messages {
 899        match message.role {
 900            Role::User => {
 901                for content in &message.content {
 902                    if let MessageContent::ToolResult(tool_result) = content {
 903                        let content = match &tool_result.content {
 904                            LanguageModelToolResultContent::Text(text) => text.to_string().into(),
 905                            LanguageModelToolResultContent::Image(image) => {
 906                                if model.supports_vision() {
 907                                    ChatMessageContent::Multipart(vec![ChatMessagePart::Image {
 908                                        image_url: ImageUrl {
 909                                            url: image.to_base64_url(),
 910                                        },
 911                                    }])
 912                                } else {
 913                                    debug_panic!(
 914                                        "This should be caught at {} level",
 915                                        tool_result.tool_name
 916                                    );
 917                                    "[Tool responded with an image, but this model does not support vision]".to_string().into()
 918                                }
 919                            }
 920                        };
 921
 922                        messages.push(ChatMessage::Tool {
 923                            tool_call_id: tool_result.tool_use_id.to_string(),
 924                            content,
 925                        });
 926                    }
 927                }
 928
 929                let mut content_parts = Vec::new();
 930                for content in &message.content {
 931                    match content {
 932                        MessageContent::Text(text) | MessageContent::Thinking { text, .. }
 933                            if !text.is_empty() =>
 934                        {
 935                            if let Some(ChatMessagePart::Text { text: text_content }) =
 936                                content_parts.last_mut()
 937                            {
 938                                text_content.push_str(text);
 939                            } else {
 940                                content_parts.push(ChatMessagePart::Text {
 941                                    text: text.to_string(),
 942                                });
 943                            }
 944                        }
 945                        MessageContent::Image(image) if model.supports_vision() => {
 946                            content_parts.push(ChatMessagePart::Image {
 947                                image_url: ImageUrl {
 948                                    url: image.to_base64_url(),
 949                                },
 950                            });
 951                        }
 952                        _ => {}
 953                    }
 954                }
 955
 956                if !content_parts.is_empty() {
 957                    messages.push(ChatMessage::User {
 958                        content: content_parts.into(),
 959                    });
 960                }
 961            }
 962            Role::Assistant => {
 963                let mut tool_calls = Vec::new();
 964                for content in &message.content {
 965                    if let MessageContent::ToolUse(tool_use) = content {
 966                        tool_calls.push(ToolCall {
 967                            id: tool_use.id.to_string(),
 968                            content: ToolCallContent::Function {
 969                                function: FunctionContent {
 970                                    name: tool_use.name.to_string(),
 971                                    arguments: serde_json::to_string(&tool_use.input)?,
 972                                    thought_signature: tool_use.thought_signature.clone(),
 973                                },
 974                            },
 975                        });
 976                    }
 977                }
 978
 979                let text_content = {
 980                    let mut buffer = String::new();
 981                    for string in message.content.iter().filter_map(|content| match content {
 982                        MessageContent::Text(text) => Some(text.as_str()),
 983                        MessageContent::Thinking { .. }
 984                        | MessageContent::ToolUse(_)
 985                        | MessageContent::RedactedThinking(_)
 986                        | MessageContent::ToolResult(_)
 987                        | MessageContent::Image(_) => None,
 988                    }) {
 989                        buffer.push_str(string);
 990                    }
 991
 992                    buffer
 993                };
 994
 995                // Extract reasoning_opaque and reasoning_text from reasoning_details
 996                let (reasoning_opaque, reasoning_text) =
 997                    if let Some(details) = &message.reasoning_details {
 998                        let opaque = details
 999                            .get("reasoning_opaque")
1000                            .and_then(|v| v.as_str())
1001                            .map(|s| s.to_string());
1002                        let text = details
1003                            .get("reasoning_text")
1004                            .and_then(|v| v.as_str())
1005                            .map(|s| s.to_string());
1006                        (opaque, text)
1007                    } else {
1008                        (None, None)
1009                    };
1010
1011                messages.push(ChatMessage::Assistant {
1012                    content: if text_content.is_empty() {
1013                        ChatMessageContent::empty()
1014                    } else {
1015                        text_content.into()
1016                    },
1017                    tool_calls,
1018                    reasoning_opaque,
1019                    reasoning_text,
1020                });
1021            }
1022            Role::System => messages.push(ChatMessage::System {
1023                content: message.string_contents(),
1024            }),
1025        }
1026    }
1027
1028    let tools = request
1029        .tools
1030        .iter()
1031        .map(|tool| Tool::Function {
1032            function: Function {
1033                name: tool.name.clone(),
1034                description: tool.description.clone(),
1035                parameters: tool.input_schema.clone(),
1036            },
1037        })
1038        .collect::<Vec<_>>();
1039
1040    Ok(CopilotChatRequest {
1041        n: 1,
1042        stream: model.uses_streaming(),
1043        temperature: temperature.unwrap_or(0.1),
1044        model: model.id().to_string(),
1045        messages,
1046        tools,
1047        tool_choice: tool_choice.map(|choice| match choice {
1048            LanguageModelToolChoice::Auto => ToolChoice::Auto,
1049            LanguageModelToolChoice::Any => ToolChoice::Required,
1050            LanguageModelToolChoice::None => ToolChoice::None,
1051        }),
1052        thinking_budget: None,
1053    })
1054}
1055
1056fn compute_thinking_budget(
1057    min_budget: Option<u32>,
1058    max_budget: Option<u32>,
1059    max_output_tokens: u32,
1060) -> Option<u32> {
1061    let configured_budget: u32 = 16000;
1062    let min_budget = min_budget.unwrap_or(1024);
1063    let max_budget = max_budget.unwrap_or(max_output_tokens.saturating_sub(1));
1064    let normalized = configured_budget.max(min_budget);
1065    Some(
1066        normalized
1067            .min(max_budget)
1068            .min(max_output_tokens.saturating_sub(1)),
1069    )
1070}
1071
1072fn intent_to_chat_location(intent: Option<CompletionIntent>) -> ChatLocation {
1073    match intent {
1074        Some(CompletionIntent::UserPrompt) => ChatLocation::Agent,
1075        Some(CompletionIntent::Subagent) => ChatLocation::Agent,
1076        Some(CompletionIntent::ToolResults) => ChatLocation::Agent,
1077        Some(CompletionIntent::ThreadSummarization) => ChatLocation::Panel,
1078        Some(CompletionIntent::ThreadContextSummarization) => ChatLocation::Panel,
1079        Some(CompletionIntent::CreateFile) => ChatLocation::Agent,
1080        Some(CompletionIntent::EditFile) => ChatLocation::Agent,
1081        Some(CompletionIntent::InlineAssist) => ChatLocation::Editor,
1082        Some(CompletionIntent::TerminalInlineAssist) => ChatLocation::Terminal,
1083        Some(CompletionIntent::GenerateGitCommitMessage) => ChatLocation::Other,
1084        None => ChatLocation::Panel,
1085    }
1086}
1087
1088fn into_copilot_responses(
1089    model: &CopilotChatModel,
1090    request: LanguageModelRequest,
1091) -> copilot_responses::Request {
1092    use copilot_responses as responses;
1093
1094    let LanguageModelRequest {
1095        thread_id: _,
1096        prompt_id: _,
1097        intent: _,
1098        messages,
1099        tools,
1100        tool_choice,
1101        stop: _,
1102        temperature,
1103        thinking_allowed,
1104        thinking_effort: _,
1105        speed: _,
1106    } = request;
1107
1108    let mut input_items: Vec<responses::ResponseInputItem> = Vec::new();
1109
1110    for message in messages {
1111        match message.role {
1112            Role::User => {
1113                for content in &message.content {
1114                    if let MessageContent::ToolResult(tool_result) = content {
1115                        let output = match &tool_result.content {
1116                            LanguageModelToolResultContent::Text(text) => {
1117                                responses::ResponseFunctionOutput::Text(text.to_string())
1118                            }
1119                            LanguageModelToolResultContent::Image(image) => {
1120                                if model.supports_vision() {
1121                                    responses::ResponseFunctionOutput::Content(vec![
1122                                        responses::ResponseInputContent::InputImage {
1123                                            image_url: Some(image.to_base64_url()),
1124                                            detail: Default::default(),
1125                                        },
1126                                    ])
1127                                } else {
1128                                    debug_panic!(
1129                                        "This should be caught at {} level",
1130                                        tool_result.tool_name
1131                                    );
1132                                    responses::ResponseFunctionOutput::Text(
1133                                            "[Tool responded with an image, but this model does not support vision]".into(),
1134                                        )
1135                                }
1136                            }
1137                        };
1138
1139                        input_items.push(responses::ResponseInputItem::FunctionCallOutput {
1140                            call_id: tool_result.tool_use_id.to_string(),
1141                            output,
1142                            status: None,
1143                        });
1144                    }
1145                }
1146
1147                let mut parts: Vec<responses::ResponseInputContent> = Vec::new();
1148                for content in &message.content {
1149                    match content {
1150                        MessageContent::Text(text) => {
1151                            parts.push(responses::ResponseInputContent::InputText {
1152                                text: text.clone(),
1153                            });
1154                        }
1155
1156                        MessageContent::Image(image) => {
1157                            if model.supports_vision() {
1158                                parts.push(responses::ResponseInputContent::InputImage {
1159                                    image_url: Some(image.to_base64_url()),
1160                                    detail: Default::default(),
1161                                });
1162                            }
1163                        }
1164                        _ => {}
1165                    }
1166                }
1167
1168                if !parts.is_empty() {
1169                    input_items.push(responses::ResponseInputItem::Message {
1170                        role: "user".into(),
1171                        content: Some(parts),
1172                        status: None,
1173                    });
1174                }
1175            }
1176
1177            Role::Assistant => {
1178                for content in &message.content {
1179                    if let MessageContent::ToolUse(tool_use) = content {
1180                        input_items.push(responses::ResponseInputItem::FunctionCall {
1181                            call_id: tool_use.id.to_string(),
1182                            name: tool_use.name.to_string(),
1183                            arguments: tool_use.raw_input.clone(),
1184                            status: None,
1185                            thought_signature: tool_use.thought_signature.clone(),
1186                        });
1187                    }
1188                }
1189
1190                for content in &message.content {
1191                    if let MessageContent::RedactedThinking(data) = content {
1192                        input_items.push(responses::ResponseInputItem::Reasoning {
1193                            id: None,
1194                            summary: Vec::new(),
1195                            encrypted_content: data.clone(),
1196                        });
1197                    }
1198                }
1199
1200                let mut parts: Vec<responses::ResponseInputContent> = Vec::new();
1201                for content in &message.content {
1202                    match content {
1203                        MessageContent::Text(text) => {
1204                            parts.push(responses::ResponseInputContent::OutputText {
1205                                text: text.clone(),
1206                            });
1207                        }
1208                        MessageContent::Image(_) => {
1209                            parts.push(responses::ResponseInputContent::OutputText {
1210                                text: "[image omitted]".to_string(),
1211                            });
1212                        }
1213                        _ => {}
1214                    }
1215                }
1216
1217                if !parts.is_empty() {
1218                    input_items.push(responses::ResponseInputItem::Message {
1219                        role: "assistant".into(),
1220                        content: Some(parts),
1221                        status: Some("completed".into()),
1222                    });
1223                }
1224            }
1225
1226            Role::System => {
1227                let mut parts: Vec<responses::ResponseInputContent> = Vec::new();
1228                for content in &message.content {
1229                    if let MessageContent::Text(text) = content {
1230                        parts.push(responses::ResponseInputContent::InputText {
1231                            text: text.clone(),
1232                        });
1233                    }
1234                }
1235
1236                if !parts.is_empty() {
1237                    input_items.push(responses::ResponseInputItem::Message {
1238                        role: "system".into(),
1239                        content: Some(parts),
1240                        status: None,
1241                    });
1242                }
1243            }
1244        }
1245    }
1246
1247    let converted_tools: Vec<responses::ToolDefinition> = tools
1248        .into_iter()
1249        .map(|tool| responses::ToolDefinition::Function {
1250            name: tool.name,
1251            description: Some(tool.description),
1252            parameters: Some(tool.input_schema),
1253            strict: None,
1254        })
1255        .collect();
1256
1257    let mapped_tool_choice = tool_choice.map(|choice| match choice {
1258        LanguageModelToolChoice::Auto => responses::ToolChoice::Auto,
1259        LanguageModelToolChoice::Any => responses::ToolChoice::Required,
1260        LanguageModelToolChoice::None => responses::ToolChoice::None,
1261    });
1262
1263    responses::Request {
1264        model: model.id().to_string(),
1265        input: input_items,
1266        stream: model.uses_streaming(),
1267        temperature,
1268        tools: converted_tools,
1269        tool_choice: mapped_tool_choice,
1270        reasoning: if thinking_allowed {
1271            Some(copilot_responses::ReasoningConfig {
1272                effort: copilot_responses::ReasoningEffort::Medium,
1273                summary: Some(copilot_responses::ReasoningSummary::Detailed),
1274            })
1275        } else {
1276            None
1277        },
1278        include: Some(vec![
1279            copilot_responses::ResponseIncludable::ReasoningEncryptedContent,
1280        ]),
1281        store: false,
1282    }
1283}
1284
1285#[cfg(test)]
1286mod tests {
1287    use super::*;
1288    use copilot_chat::responses;
1289    use futures::StreamExt;
1290
1291    fn map_events(events: Vec<responses::StreamEvent>) -> Vec<LanguageModelCompletionEvent> {
1292        futures::executor::block_on(async {
1293            CopilotResponsesEventMapper::new()
1294                .map_stream(Box::pin(futures::stream::iter(events.into_iter().map(Ok))))
1295                .collect::<Vec<_>>()
1296                .await
1297                .into_iter()
1298                .map(Result::unwrap)
1299                .collect()
1300        })
1301    }
1302
1303    #[test]
1304    fn responses_stream_maps_text_and_usage() {
1305        let events = vec![
1306            responses::StreamEvent::OutputItemAdded {
1307                output_index: 0,
1308                sequence_number: None,
1309                item: responses::ResponseOutputItem::Message {
1310                    id: "msg_1".into(),
1311                    role: "assistant".into(),
1312                    content: Some(Vec::new()),
1313                },
1314            },
1315            responses::StreamEvent::OutputTextDelta {
1316                item_id: "msg_1".into(),
1317                output_index: 0,
1318                delta: "Hello".into(),
1319            },
1320            responses::StreamEvent::Completed {
1321                response: responses::Response {
1322                    usage: Some(responses::ResponseUsage {
1323                        input_tokens: Some(5),
1324                        output_tokens: Some(3),
1325                        total_tokens: Some(8),
1326                    }),
1327                    ..Default::default()
1328                },
1329            },
1330        ];
1331
1332        let mapped = map_events(events);
1333        assert!(matches!(
1334            mapped[0],
1335            LanguageModelCompletionEvent::StartMessage { ref message_id } if message_id == "msg_1"
1336        ));
1337        assert!(matches!(
1338            mapped[1],
1339            LanguageModelCompletionEvent::Text(ref text) if text == "Hello"
1340        ));
1341        assert!(matches!(
1342            mapped[2],
1343            LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
1344                input_tokens: 5,
1345                output_tokens: 3,
1346                ..
1347            })
1348        ));
1349        assert!(matches!(
1350            mapped[3],
1351            LanguageModelCompletionEvent::Stop(StopReason::EndTurn)
1352        ));
1353    }
1354
1355    #[test]
1356    fn responses_stream_maps_tool_calls() {
1357        let events = vec![responses::StreamEvent::OutputItemDone {
1358            output_index: 0,
1359            sequence_number: None,
1360            item: responses::ResponseOutputItem::FunctionCall {
1361                id: Some("fn_1".into()),
1362                call_id: "call_1".into(),
1363                name: "do_it".into(),
1364                arguments: "{\"x\":1}".into(),
1365                status: None,
1366                thought_signature: None,
1367            },
1368        }];
1369
1370        let mapped = map_events(events);
1371        assert!(matches!(
1372            mapped[0],
1373            LanguageModelCompletionEvent::ToolUse(ref use_) if use_.id.to_string() == "call_1" && use_.name.as_ref() == "do_it"
1374        ));
1375        assert!(matches!(
1376            mapped[1],
1377            LanguageModelCompletionEvent::Stop(StopReason::ToolUse)
1378        ));
1379    }
1380
1381    #[test]
1382    fn responses_stream_handles_json_parse_error() {
1383        let events = vec![responses::StreamEvent::OutputItemDone {
1384            output_index: 0,
1385            sequence_number: None,
1386            item: responses::ResponseOutputItem::FunctionCall {
1387                id: Some("fn_1".into()),
1388                call_id: "call_1".into(),
1389                name: "do_it".into(),
1390                arguments: "{not json}".into(),
1391                status: None,
1392                thought_signature: None,
1393            },
1394        }];
1395
1396        let mapped = map_events(events);
1397        assert!(matches!(
1398            mapped[0],
1399            LanguageModelCompletionEvent::ToolUseJsonParseError { ref id, ref tool_name, .. }
1400                if id.to_string() == "call_1" && tool_name.as_ref() == "do_it"
1401        ));
1402        assert!(matches!(
1403            mapped[1],
1404            LanguageModelCompletionEvent::Stop(StopReason::ToolUse)
1405        ));
1406    }
1407
1408    #[test]
1409    fn responses_stream_maps_reasoning_summary_and_encrypted_content() {
1410        let events = vec![responses::StreamEvent::OutputItemDone {
1411            output_index: 0,
1412            sequence_number: None,
1413            item: responses::ResponseOutputItem::Reasoning {
1414                id: "r1".into(),
1415                summary: Some(vec![responses::ResponseReasoningItem {
1416                    kind: "summary_text".into(),
1417                    text: "Chain".into(),
1418                }]),
1419                encrypted_content: Some("ENC".into()),
1420            },
1421        }];
1422
1423        let mapped = map_events(events);
1424        assert!(matches!(
1425            mapped[0],
1426            LanguageModelCompletionEvent::Thinking { ref text, signature: None } if text == "Chain"
1427        ));
1428        assert!(matches!(
1429            mapped[1],
1430            LanguageModelCompletionEvent::RedactedThinking { ref data } if data == "ENC"
1431        ));
1432    }
1433
1434    #[test]
1435    fn responses_stream_handles_incomplete_max_tokens() {
1436        let events = vec![responses::StreamEvent::Incomplete {
1437            response: responses::Response {
1438                usage: Some(responses::ResponseUsage {
1439                    input_tokens: Some(10),
1440                    output_tokens: Some(0),
1441                    total_tokens: Some(10),
1442                }),
1443                incomplete_details: Some(responses::IncompleteDetails {
1444                    reason: Some(responses::IncompleteReason::MaxOutputTokens),
1445                }),
1446                ..Default::default()
1447            },
1448        }];
1449
1450        let mapped = map_events(events);
1451        assert!(matches!(
1452            mapped[0],
1453            LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
1454                input_tokens: 10,
1455                output_tokens: 0,
1456                ..
1457            })
1458        ));
1459        assert!(matches!(
1460            mapped[1],
1461            LanguageModelCompletionEvent::Stop(StopReason::MaxTokens)
1462        ));
1463    }
1464
1465    #[test]
1466    fn responses_stream_handles_incomplete_content_filter() {
1467        let events = vec![responses::StreamEvent::Incomplete {
1468            response: responses::Response {
1469                usage: None,
1470                incomplete_details: Some(responses::IncompleteDetails {
1471                    reason: Some(responses::IncompleteReason::ContentFilter),
1472                }),
1473                ..Default::default()
1474            },
1475        }];
1476
1477        let mapped = map_events(events);
1478        assert!(matches!(
1479            mapped.last().unwrap(),
1480            LanguageModelCompletionEvent::Stop(StopReason::Refusal)
1481        ));
1482    }
1483
1484    #[test]
1485    fn responses_stream_completed_no_duplicate_after_tool_use() {
1486        let events = vec![
1487            responses::StreamEvent::OutputItemDone {
1488                output_index: 0,
1489                sequence_number: None,
1490                item: responses::ResponseOutputItem::FunctionCall {
1491                    id: Some("fn_1".into()),
1492                    call_id: "call_1".into(),
1493                    name: "do_it".into(),
1494                    arguments: "{}".into(),
1495                    status: None,
1496                    thought_signature: None,
1497                },
1498            },
1499            responses::StreamEvent::Completed {
1500                response: responses::Response::default(),
1501            },
1502        ];
1503
1504        let mapped = map_events(events);
1505
1506        let mut stop_count = 0usize;
1507        let mut saw_tool_use_stop = false;
1508        for event in mapped {
1509            if let LanguageModelCompletionEvent::Stop(reason) = event {
1510                stop_count += 1;
1511                if matches!(reason, StopReason::ToolUse) {
1512                    saw_tool_use_stop = true;
1513                }
1514            }
1515        }
1516        assert_eq!(stop_count, 1, "should emit exactly one Stop event");
1517        assert!(saw_tool_use_stop, "Stop reason should be ToolUse");
1518    }
1519
1520    #[test]
1521    fn responses_stream_failed_maps_http_response_error() {
1522        let events = vec![responses::StreamEvent::Failed {
1523            response: responses::Response {
1524                error: Some(responses::ResponseError {
1525                    code: "429".into(),
1526                    message: "too many requests".into(),
1527                }),
1528                ..Default::default()
1529            },
1530        }];
1531
1532        let mapped_results = futures::executor::block_on(async {
1533            CopilotResponsesEventMapper::new()
1534                .map_stream(Box::pin(futures::stream::iter(events.into_iter().map(Ok))))
1535                .collect::<Vec<_>>()
1536                .await
1537        });
1538
1539        assert_eq!(mapped_results.len(), 1);
1540        match &mapped_results[0] {
1541            Err(LanguageModelCompletionError::HttpResponseError {
1542                status_code,
1543                message,
1544                ..
1545            }) => {
1546                assert_eq!(*status_code, http_client::StatusCode::TOO_MANY_REQUESTS);
1547                assert_eq!(message, "too many requests");
1548            }
1549            other => panic!("expected HttpResponseError, got {:?}", other),
1550        }
1551    }
1552
1553    #[test]
1554    fn chat_completions_stream_maps_reasoning_data() {
1555        use copilot_chat::{
1556            FunctionChunk, ResponseChoice, ResponseDelta, ResponseEvent, Role, ToolCallChunk,
1557        };
1558
1559        let events = vec![
1560            ResponseEvent {
1561                choices: vec![ResponseChoice {
1562                    index: Some(0),
1563                    finish_reason: None,
1564                    delta: Some(ResponseDelta {
1565                        content: None,
1566                        role: Some(Role::Assistant),
1567                        tool_calls: vec![ToolCallChunk {
1568                            index: Some(0),
1569                            id: Some("call_abc123".to_string()),
1570                            function: Some(FunctionChunk {
1571                                name: Some("list_directory".to_string()),
1572                                arguments: Some("{\"path\":\"test\"}".to_string()),
1573                                thought_signature: None,
1574                            }),
1575                        }],
1576                        reasoning_opaque: Some("encrypted_reasoning_token_xyz".to_string()),
1577                        reasoning_text: Some("Let me check the directory".to_string()),
1578                    }),
1579                    message: None,
1580                }],
1581                id: "chatcmpl-123".to_string(),
1582                usage: None,
1583            },
1584            ResponseEvent {
1585                choices: vec![ResponseChoice {
1586                    index: Some(0),
1587                    finish_reason: Some("tool_calls".to_string()),
1588                    delta: Some(ResponseDelta {
1589                        content: None,
1590                        role: None,
1591                        tool_calls: vec![],
1592                        reasoning_opaque: None,
1593                        reasoning_text: None,
1594                    }),
1595                    message: None,
1596                }],
1597                id: "chatcmpl-123".to_string(),
1598                usage: None,
1599            },
1600        ];
1601
1602        let mapped = futures::executor::block_on(async {
1603            map_to_language_model_completion_events(
1604                Box::pin(futures::stream::iter(events.into_iter().map(Ok))),
1605                true,
1606            )
1607            .collect::<Vec<_>>()
1608            .await
1609        });
1610
1611        let mut has_reasoning_details = false;
1612        let mut has_tool_use = false;
1613        let mut reasoning_opaque_value: Option<String> = None;
1614        let mut reasoning_text_value: Option<String> = None;
1615
1616        for event_result in mapped {
1617            match event_result {
1618                Ok(LanguageModelCompletionEvent::ReasoningDetails(details)) => {
1619                    has_reasoning_details = true;
1620                    reasoning_opaque_value = details
1621                        .get("reasoning_opaque")
1622                        .and_then(|v| v.as_str())
1623                        .map(|s| s.to_string());
1624                    reasoning_text_value = details
1625                        .get("reasoning_text")
1626                        .and_then(|v| v.as_str())
1627                        .map(|s| s.to_string());
1628                }
1629                Ok(LanguageModelCompletionEvent::ToolUse(tool_use)) => {
1630                    has_tool_use = true;
1631                    assert_eq!(tool_use.id.to_string(), "call_abc123");
1632                    assert_eq!(tool_use.name.as_ref(), "list_directory");
1633                }
1634                _ => {}
1635            }
1636        }
1637
1638        assert!(
1639            has_reasoning_details,
1640            "Should emit ReasoningDetails event for Gemini 3 reasoning"
1641        );
1642        assert!(has_tool_use, "Should emit ToolUse event");
1643        assert_eq!(
1644            reasoning_opaque_value,
1645            Some("encrypted_reasoning_token_xyz".to_string()),
1646            "Should capture reasoning_opaque"
1647        );
1648        assert_eq!(
1649            reasoning_text_value,
1650            Some("Let me check the directory".to_string()),
1651            "Should capture reasoning_text"
1652        );
1653    }
1654}