copilot_chat.rs

   1use std::pin::Pin;
   2use std::str::FromStr as _;
   3use std::sync::Arc;
   4
   5use anthropic::AnthropicModelMode;
   6use anyhow::{Result, anyhow};
   7use cloud_llm_client::CompletionIntent;
   8use collections::HashMap;
   9use copilot::{GlobalCopilotAuth, Status};
  10use copilot_chat::responses as copilot_responses;
  11use copilot_chat::{
  12    ChatLocation, ChatMessage, ChatMessageContent, ChatMessagePart, CopilotChat,
  13    CopilotChatConfiguration, Function, FunctionContent, ImageUrl, Model as CopilotChatModel,
  14    ModelVendor, Request as CopilotChatRequest, ResponseEvent, Tool, ToolCall, ToolCallContent,
  15    ToolChoice,
  16};
  17use futures::future::BoxFuture;
  18use futures::stream::BoxStream;
  19use futures::{FutureExt, Stream, StreamExt};
  20use gpui::{AnyView, App, AsyncApp, Entity, Subscription, Task};
  21use http_client::StatusCode;
  22use language::language_settings::all_language_settings;
  23use language_model::{
  24    AuthenticateError, IconOrSvg, LanguageModel, LanguageModelCompletionError,
  25    LanguageModelCompletionEvent, LanguageModelCostInfo, LanguageModelEffortLevel, LanguageModelId,
  26    LanguageModelName, LanguageModelProvider, LanguageModelProviderId, LanguageModelProviderName,
  27    LanguageModelProviderState, LanguageModelRequest, LanguageModelRequestMessage,
  28    LanguageModelToolChoice, LanguageModelToolResultContent, LanguageModelToolSchemaFormat,
  29    LanguageModelToolUse, MessageContent, RateLimiter, Role, StopReason, TokenUsage,
  30};
  31use settings::SettingsStore;
  32use ui::prelude::*;
  33use util::debug_panic;
  34
  35use crate::provider::anthropic::{AnthropicEventMapper, into_anthropic};
  36use crate::provider::util::parse_tool_arguments;
  37
  38const PROVIDER_ID: LanguageModelProviderId = LanguageModelProviderId::new("copilot_chat");
  39const PROVIDER_NAME: LanguageModelProviderName =
  40    LanguageModelProviderName::new("GitHub Copilot Chat");
  41
  42pub struct CopilotChatLanguageModelProvider {
  43    state: Entity<State>,
  44}
  45
  46pub struct State {
  47    _copilot_chat_subscription: Option<Subscription>,
  48    _settings_subscription: Subscription,
  49}
  50
  51impl State {
  52    fn is_authenticated(&self, cx: &App) -> bool {
  53        CopilotChat::global(cx)
  54            .map(|m| m.read(cx).is_authenticated())
  55            .unwrap_or(false)
  56    }
  57}
  58
  59impl CopilotChatLanguageModelProvider {
  60    pub fn new(cx: &mut App) -> Self {
  61        let state = cx.new(|cx| {
  62            let copilot_chat_subscription = CopilotChat::global(cx)
  63                .map(|copilot_chat| cx.observe(&copilot_chat, |_, _, cx| cx.notify()));
  64            State {
  65                _copilot_chat_subscription: copilot_chat_subscription,
  66                _settings_subscription: cx.observe_global::<SettingsStore>(|_, cx| {
  67                    if let Some(copilot_chat) = CopilotChat::global(cx) {
  68                        let language_settings = all_language_settings(None, cx);
  69                        let configuration = CopilotChatConfiguration {
  70                            enterprise_uri: language_settings
  71                                .edit_predictions
  72                                .copilot
  73                                .enterprise_uri
  74                                .clone(),
  75                        };
  76                        copilot_chat.update(cx, |chat, cx| {
  77                            chat.set_configuration(configuration, cx);
  78                        });
  79                    }
  80                    cx.notify();
  81                }),
  82            }
  83        });
  84
  85        Self { state }
  86    }
  87
  88    fn create_language_model(&self, model: CopilotChatModel) -> Arc<dyn LanguageModel> {
  89        Arc::new(CopilotChatLanguageModel {
  90            model,
  91            request_limiter: RateLimiter::new(4),
  92        })
  93    }
  94}
  95
  96impl LanguageModelProviderState for CopilotChatLanguageModelProvider {
  97    type ObservableEntity = State;
  98
  99    fn observable_entity(&self) -> Option<Entity<Self::ObservableEntity>> {
 100        Some(self.state.clone())
 101    }
 102}
 103
 104impl LanguageModelProvider for CopilotChatLanguageModelProvider {
 105    fn id(&self) -> LanguageModelProviderId {
 106        PROVIDER_ID
 107    }
 108
 109    fn name(&self) -> LanguageModelProviderName {
 110        PROVIDER_NAME
 111    }
 112
 113    fn icon(&self) -> IconOrSvg {
 114        IconOrSvg::Icon(IconName::Copilot)
 115    }
 116
 117    fn default_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
 118        let models = CopilotChat::global(cx).and_then(|m| m.read(cx).models())?;
 119        models
 120            .first()
 121            .map(|model| self.create_language_model(model.clone()))
 122    }
 123
 124    fn default_fast_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
 125        // The default model should be Copilot Chat's 'base model', which is likely a relatively fast
 126        // model (e.g. 4o) and a sensible choice when considering premium requests
 127        self.default_model(cx)
 128    }
 129
 130    fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
 131        let Some(models) = CopilotChat::global(cx).and_then(|m| m.read(cx).models()) else {
 132            return Vec::new();
 133        };
 134        models
 135            .iter()
 136            .map(|model| self.create_language_model(model.clone()))
 137            .collect()
 138    }
 139
 140    fn is_authenticated(&self, cx: &App) -> bool {
 141        self.state.read(cx).is_authenticated(cx)
 142    }
 143
 144    fn authenticate(&self, cx: &mut App) -> Task<Result<(), AuthenticateError>> {
 145        if self.is_authenticated(cx) {
 146            return Task::ready(Ok(()));
 147        };
 148
 149        let Some(copilot) = GlobalCopilotAuth::try_global(cx).cloned() else {
 150            return Task::ready(Err(anyhow!(concat!(
 151                "Copilot must be enabled for Copilot Chat to work. ",
 152                "Please enable Copilot and try again."
 153            ))
 154            .into()));
 155        };
 156
 157        let err = match copilot.0.read(cx).status() {
 158            Status::Authorized => return Task::ready(Ok(())),
 159            Status::Disabled => anyhow!(
 160                "Copilot must be enabled for Copilot Chat to work. Please enable Copilot and try again."
 161            ),
 162            Status::Error(err) => anyhow!(format!(
 163                "Received the following error while signing into Copilot: {err}"
 164            )),
 165            Status::Starting { task: _ } => anyhow!(
 166                "Copilot is still starting, please wait for Copilot to start then try again"
 167            ),
 168            Status::Unauthorized => anyhow!(
 169                "Unable to authorize with Copilot. Please make sure that you have an active Copilot and Copilot Chat subscription."
 170            ),
 171            Status::SignedOut { .. } => {
 172                anyhow!("You have signed out of Copilot. Please sign in to Copilot and try again.")
 173            }
 174            Status::SigningIn { prompt: _ } => anyhow!("Still signing into Copilot..."),
 175        };
 176
 177        Task::ready(Err(err.into()))
 178    }
 179
 180    fn configuration_view(
 181        &self,
 182        _target_agent: language_model::ConfigurationViewTargetAgent,
 183        _: &mut Window,
 184        cx: &mut App,
 185    ) -> AnyView {
 186        cx.new(|cx| {
 187            copilot_ui::ConfigurationView::new(
 188                |cx| {
 189                    CopilotChat::global(cx)
 190                        .map(|m| m.read(cx).is_authenticated())
 191                        .unwrap_or(false)
 192                },
 193                copilot_ui::ConfigurationMode::Chat,
 194                cx,
 195            )
 196        })
 197        .into()
 198    }
 199
 200    fn reset_credentials(&self, _cx: &mut App) -> Task<Result<()>> {
 201        Task::ready(Err(anyhow!(
 202            "Signing out of GitHub Copilot Chat is currently not supported."
 203        )))
 204    }
 205}
 206
 207fn collect_tiktoken_messages(
 208    request: LanguageModelRequest,
 209) -> Vec<tiktoken_rs::ChatCompletionRequestMessage> {
 210    request
 211        .messages
 212        .into_iter()
 213        .map(|message| tiktoken_rs::ChatCompletionRequestMessage {
 214            role: match message.role {
 215                Role::User => "user".into(),
 216                Role::Assistant => "assistant".into(),
 217                Role::System => "system".into(),
 218            },
 219            content: Some(message.string_contents()),
 220            name: None,
 221            function_call: None,
 222        })
 223        .collect::<Vec<_>>()
 224}
 225
 226pub struct CopilotChatLanguageModel {
 227    model: CopilotChatModel,
 228    request_limiter: RateLimiter,
 229}
 230
 231impl LanguageModel for CopilotChatLanguageModel {
 232    fn id(&self) -> LanguageModelId {
 233        LanguageModelId::from(self.model.id().to_string())
 234    }
 235
 236    fn name(&self) -> LanguageModelName {
 237        LanguageModelName::from(self.model.display_name().to_string())
 238    }
 239
 240    fn provider_id(&self) -> LanguageModelProviderId {
 241        PROVIDER_ID
 242    }
 243
 244    fn provider_name(&self) -> LanguageModelProviderName {
 245        PROVIDER_NAME
 246    }
 247
 248    fn supports_tools(&self) -> bool {
 249        self.model.supports_tools()
 250    }
 251
 252    fn supports_streaming_tools(&self) -> bool {
 253        true
 254    }
 255
 256    fn supports_images(&self) -> bool {
 257        self.model.supports_vision()
 258    }
 259
 260    fn supports_thinking(&self) -> bool {
 261        self.model.can_think()
 262    }
 263
 264    fn supported_effort_levels(&self) -> Vec<LanguageModelEffortLevel> {
 265        let levels = self.model.reasoning_effort_levels();
 266        if levels.is_empty() {
 267            return vec![];
 268        }
 269        levels
 270            .iter()
 271            .map(|level| {
 272                let name: SharedString = match level.as_str() {
 273                    "low" => "Low".into(),
 274                    "medium" => "Medium".into(),
 275                    "high" => "High".into(),
 276                    _ => SharedString::from(level.clone()),
 277                };
 278                LanguageModelEffortLevel {
 279                    name,
 280                    value: SharedString::from(level.clone()),
 281                    is_default: level == "high",
 282                }
 283            })
 284            .collect()
 285    }
 286
 287    fn tool_input_format(&self) -> LanguageModelToolSchemaFormat {
 288        match self.model.vendor() {
 289            ModelVendor::OpenAI | ModelVendor::Anthropic => {
 290                LanguageModelToolSchemaFormat::JsonSchema
 291            }
 292            ModelVendor::Google | ModelVendor::XAI | ModelVendor::Unknown => {
 293                LanguageModelToolSchemaFormat::JsonSchemaSubset
 294            }
 295        }
 296    }
 297
 298    fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
 299        match choice {
 300            LanguageModelToolChoice::Auto
 301            | LanguageModelToolChoice::Any
 302            | LanguageModelToolChoice::None => self.supports_tools(),
 303        }
 304    }
 305
 306    fn model_cost_info(&self) -> Option<LanguageModelCostInfo> {
 307        LanguageModelCostInfo::RequestCost {
 308            cost_per_request: self.model.multiplier(),
 309        }
 310        .into()
 311    }
 312
 313    fn telemetry_id(&self) -> String {
 314        format!("copilot_chat/{}", self.model.id())
 315    }
 316
 317    fn max_token_count(&self) -> u64 {
 318        self.model.max_token_count()
 319    }
 320
 321    fn count_tokens(
 322        &self,
 323        request: LanguageModelRequest,
 324        cx: &App,
 325    ) -> BoxFuture<'static, Result<u64>> {
 326        let model = self.model.clone();
 327        cx.background_spawn(async move {
 328            let messages = collect_tiktoken_messages(request);
 329            // Copilot uses OpenAI tiktoken tokenizer for all it's model irrespective of the underlying provider(vendor).
 330            let tokenizer_model = match model.tokenizer() {
 331                Some("o200k_base") => "gpt-4o",
 332                Some("cl100k_base") => "gpt-4",
 333                _ => "gpt-4o",
 334            };
 335
 336            tiktoken_rs::num_tokens_from_messages(tokenizer_model, &messages)
 337                .map(|tokens| tokens as u64)
 338        })
 339        .boxed()
 340    }
 341
 342    fn stream_completion(
 343        &self,
 344        request: LanguageModelRequest,
 345        cx: &AsyncApp,
 346    ) -> BoxFuture<
 347        'static,
 348        Result<
 349            BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>,
 350            LanguageModelCompletionError,
 351        >,
 352    > {
 353        let is_user_initiated = request.intent.is_none_or(|intent| match intent {
 354            CompletionIntent::UserPrompt
 355            | CompletionIntent::ThreadContextSummarization
 356            | CompletionIntent::InlineAssist
 357            | CompletionIntent::TerminalInlineAssist
 358            | CompletionIntent::GenerateGitCommitMessage => true,
 359
 360            CompletionIntent::Subagent
 361            | CompletionIntent::ToolResults
 362            | CompletionIntent::ThreadSummarization
 363            | CompletionIntent::CreateFile
 364            | CompletionIntent::EditFile => false,
 365        });
 366
 367        if self.model.supports_messages() {
 368            let location = intent_to_chat_location(request.intent);
 369            let model = self.model.clone();
 370            let request_limiter = self.request_limiter.clone();
 371            let future = cx.spawn(async move |cx| {
 372                let effort = request
 373                    .thinking_effort
 374                    .as_ref()
 375                    .and_then(|e| anthropic::Effort::from_str(e).ok());
 376
 377                let mut anthropic_request = into_anthropic(
 378                    request,
 379                    model.id().to_string(),
 380                    0.0,
 381                    model.max_output_tokens() as u64,
 382                    if model.supports_adaptive_thinking() {
 383                        AnthropicModelMode::Thinking {
 384                            budget_tokens: None,
 385                        }
 386                    } else if model.can_think() {
 387                        AnthropicModelMode::Thinking {
 388                            budget_tokens: compute_thinking_budget(
 389                                model.min_thinking_budget(),
 390                                model.max_thinking_budget(),
 391                                model.max_output_tokens() as u32,
 392                            ),
 393                        }
 394                    } else {
 395                        AnthropicModelMode::Default
 396                    },
 397                );
 398
 399                anthropic_request.temperature = None;
 400
 401                // The Copilot proxy doesn't support eager_input_streaming on tools.
 402                for tool in &mut anthropic_request.tools {
 403                    tool.eager_input_streaming = false;
 404                }
 405
 406                if model.supports_adaptive_thinking() {
 407                    if anthropic_request.thinking.is_some() {
 408                        anthropic_request.thinking = Some(anthropic::Thinking::Adaptive);
 409                        anthropic_request.output_config = Some(anthropic::OutputConfig { effort });
 410                    }
 411                }
 412
 413                let anthropic_beta = if !model.supports_adaptive_thinking() && model.can_think() {
 414                    Some("interleaved-thinking-2025-05-14".to_string())
 415                } else {
 416                    None
 417                };
 418
 419                let body = serde_json::to_string(&anthropic::StreamingRequest {
 420                    base: anthropic_request,
 421                    stream: true,
 422                })
 423                .map_err(|e| anyhow::anyhow!(e))?;
 424
 425                let stream = CopilotChat::stream_messages(
 426                    body,
 427                    location,
 428                    is_user_initiated,
 429                    anthropic_beta,
 430                    cx.clone(),
 431                );
 432
 433                request_limiter
 434                    .stream(async move {
 435                        let events = stream.await?;
 436                        let mapper = AnthropicEventMapper::new();
 437                        Ok(mapper.map_stream(events).boxed())
 438                    })
 439                    .await
 440            });
 441            return async move { Ok(future.await?.boxed()) }.boxed();
 442        }
 443
 444        if self.model.supports_response() {
 445            let location = intent_to_chat_location(request.intent);
 446            let responses_request = into_copilot_responses(&self.model, request);
 447            let request_limiter = self.request_limiter.clone();
 448            let future = cx.spawn(async move |cx| {
 449                let request = CopilotChat::stream_response(
 450                    responses_request,
 451                    location,
 452                    is_user_initiated,
 453                    cx.clone(),
 454                );
 455                request_limiter
 456                    .stream(async move {
 457                        let stream = request.await?;
 458                        let mapper = CopilotResponsesEventMapper::new();
 459                        Ok(mapper.map_stream(stream).boxed())
 460                    })
 461                    .await
 462            });
 463            return async move { Ok(future.await?.boxed()) }.boxed();
 464        }
 465
 466        let location = intent_to_chat_location(request.intent);
 467        let copilot_request = match into_copilot_chat(&self.model, request) {
 468            Ok(request) => request,
 469            Err(err) => return futures::future::ready(Err(err.into())).boxed(),
 470        };
 471        let is_streaming = copilot_request.stream;
 472
 473        let request_limiter = self.request_limiter.clone();
 474        let future = cx.spawn(async move |cx| {
 475            let request = CopilotChat::stream_completion(
 476                copilot_request,
 477                location,
 478                is_user_initiated,
 479                cx.clone(),
 480            );
 481            request_limiter
 482                .stream(async move {
 483                    let response = request.await?;
 484                    Ok(map_to_language_model_completion_events(
 485                        response,
 486                        is_streaming,
 487                    ))
 488                })
 489                .await
 490        });
 491        async move { Ok(future.await?.boxed()) }.boxed()
 492    }
 493}
 494
 495pub fn map_to_language_model_completion_events(
 496    events: Pin<Box<dyn Send + Stream<Item = Result<ResponseEvent>>>>,
 497    is_streaming: bool,
 498) -> impl Stream<Item = Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
 499    #[derive(Default)]
 500    struct RawToolCall {
 501        id: String,
 502        name: String,
 503        arguments: String,
 504        thought_signature: Option<String>,
 505    }
 506
 507    struct State {
 508        events: Pin<Box<dyn Send + Stream<Item = Result<ResponseEvent>>>>,
 509        tool_calls_by_index: HashMap<usize, RawToolCall>,
 510        reasoning_opaque: Option<String>,
 511        reasoning_text: Option<String>,
 512    }
 513
 514    futures::stream::unfold(
 515        State {
 516            events,
 517            tool_calls_by_index: HashMap::default(),
 518            reasoning_opaque: None,
 519            reasoning_text: None,
 520        },
 521        move |mut state| async move {
 522            if let Some(event) = state.events.next().await {
 523                match event {
 524                    Ok(event) => {
 525                        let Some(choice) = event.choices.first() else {
 526                            return Some((
 527                                vec![Err(anyhow!("Response contained no choices").into())],
 528                                state,
 529                            ));
 530                        };
 531
 532                        let delta = if is_streaming {
 533                            choice.delta.as_ref()
 534                        } else {
 535                            choice.message.as_ref()
 536                        };
 537
 538                        let Some(delta) = delta else {
 539                            return Some((
 540                                vec![Err(anyhow!("Response contained no delta").into())],
 541                                state,
 542                            ));
 543                        };
 544
 545                        let mut events = Vec::new();
 546                        if let Some(content) = delta.content.clone() {
 547                            events.push(Ok(LanguageModelCompletionEvent::Text(content)));
 548                        }
 549
 550                        // Capture reasoning data from the delta (e.g. for Gemini 3)
 551                        if let Some(opaque) = delta.reasoning_opaque.clone() {
 552                            state.reasoning_opaque = Some(opaque);
 553                        }
 554                        if let Some(text) = delta.reasoning_text.clone() {
 555                            state.reasoning_text = Some(text);
 556                        }
 557
 558                        for (index, tool_call) in delta.tool_calls.iter().enumerate() {
 559                            let tool_index = tool_call.index.unwrap_or(index);
 560                            let entry = state.tool_calls_by_index.entry(tool_index).or_default();
 561
 562                            if let Some(tool_id) = tool_call.id.clone() {
 563                                entry.id = tool_id;
 564                            }
 565
 566                            if let Some(function) = tool_call.function.as_ref() {
 567                                if let Some(name) = function.name.clone() {
 568                                    entry.name = name;
 569                                }
 570
 571                                if let Some(arguments) = function.arguments.clone() {
 572                                    entry.arguments.push_str(&arguments);
 573                                }
 574
 575                                if let Some(thought_signature) = function.thought_signature.clone()
 576                                {
 577                                    entry.thought_signature = Some(thought_signature);
 578                                }
 579                            }
 580
 581                            if !entry.id.is_empty() && !entry.name.is_empty() {
 582                                if let Ok(input) = serde_json::from_str::<serde_json::Value>(
 583                                    &partial_json_fixer::fix_json(&entry.arguments),
 584                                ) {
 585                                    events.push(Ok(LanguageModelCompletionEvent::ToolUse(
 586                                        LanguageModelToolUse {
 587                                            id: entry.id.clone().into(),
 588                                            name: entry.name.as_str().into(),
 589                                            is_input_complete: false,
 590                                            input,
 591                                            raw_input: entry.arguments.clone(),
 592                                            thought_signature: entry.thought_signature.clone(),
 593                                        },
 594                                    )));
 595                                }
 596                            }
 597                        }
 598
 599                        if let Some(usage) = event.usage {
 600                            events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(
 601                                TokenUsage {
 602                                    input_tokens: usage.prompt_tokens,
 603                                    output_tokens: usage.completion_tokens,
 604                                    cache_creation_input_tokens: 0,
 605                                    cache_read_input_tokens: 0,
 606                                },
 607                            )));
 608                        }
 609
 610                        match choice.finish_reason.as_deref() {
 611                            Some("stop") => {
 612                                events.push(Ok(LanguageModelCompletionEvent::Stop(
 613                                    StopReason::EndTurn,
 614                                )));
 615                            }
 616                            Some("tool_calls") => {
 617                                // Gemini 3 models send reasoning_opaque/reasoning_text that must
 618                                // be preserved and sent back in subsequent requests. Emit as
 619                                // ReasoningDetails so the agent stores it in the message.
 620                                if state.reasoning_opaque.is_some()
 621                                    || state.reasoning_text.is_some()
 622                                {
 623                                    let mut details = serde_json::Map::new();
 624                                    if let Some(opaque) = state.reasoning_opaque.take() {
 625                                        details.insert(
 626                                            "reasoning_opaque".to_string(),
 627                                            serde_json::Value::String(opaque),
 628                                        );
 629                                    }
 630                                    if let Some(text) = state.reasoning_text.take() {
 631                                        details.insert(
 632                                            "reasoning_text".to_string(),
 633                                            serde_json::Value::String(text),
 634                                        );
 635                                    }
 636                                    events.push(Ok(
 637                                        LanguageModelCompletionEvent::ReasoningDetails(
 638                                            serde_json::Value::Object(details),
 639                                        ),
 640                                    ));
 641                                }
 642
 643                                events.extend(state.tool_calls_by_index.drain().map(
 644                                    |(_, tool_call)| match parse_tool_arguments(
 645                                        &tool_call.arguments,
 646                                    ) {
 647                                        Ok(input) => Ok(LanguageModelCompletionEvent::ToolUse(
 648                                            LanguageModelToolUse {
 649                                                id: tool_call.id.into(),
 650                                                name: tool_call.name.as_str().into(),
 651                                                is_input_complete: true,
 652                                                input,
 653                                                raw_input: tool_call.arguments,
 654                                                thought_signature: tool_call.thought_signature,
 655                                            },
 656                                        )),
 657                                        Err(error) => Ok(
 658                                            LanguageModelCompletionEvent::ToolUseJsonParseError {
 659                                                id: tool_call.id.into(),
 660                                                tool_name: tool_call.name.as_str().into(),
 661                                                raw_input: tool_call.arguments.into(),
 662                                                json_parse_error: error.to_string(),
 663                                            },
 664                                        ),
 665                                    },
 666                                ));
 667
 668                                events.push(Ok(LanguageModelCompletionEvent::Stop(
 669                                    StopReason::ToolUse,
 670                                )));
 671                            }
 672                            Some(stop_reason) => {
 673                                log::error!("Unexpected Copilot Chat stop_reason: {stop_reason:?}");
 674                                events.push(Ok(LanguageModelCompletionEvent::Stop(
 675                                    StopReason::EndTurn,
 676                                )));
 677                            }
 678                            None => {}
 679                        }
 680
 681                        return Some((events, state));
 682                    }
 683                    Err(err) => return Some((vec![Err(anyhow!(err).into())], state)),
 684                }
 685            }
 686
 687            None
 688        },
 689    )
 690    .flat_map(futures::stream::iter)
 691}
 692
 693pub struct CopilotResponsesEventMapper {
 694    pending_stop_reason: Option<StopReason>,
 695}
 696
 697impl CopilotResponsesEventMapper {
 698    pub fn new() -> Self {
 699        Self {
 700            pending_stop_reason: None,
 701        }
 702    }
 703
 704    pub fn map_stream(
 705        mut self,
 706        events: Pin<Box<dyn Send + Stream<Item = Result<copilot_responses::StreamEvent>>>>,
 707    ) -> impl Stream<Item = Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
 708    {
 709        events.flat_map(move |event| {
 710            futures::stream::iter(match event {
 711                Ok(event) => self.map_event(event),
 712                Err(error) => vec![Err(LanguageModelCompletionError::from(anyhow!(error)))],
 713            })
 714        })
 715    }
 716
 717    fn map_event(
 718        &mut self,
 719        event: copilot_responses::StreamEvent,
 720    ) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
 721        match event {
 722            copilot_responses::StreamEvent::OutputItemAdded { item, .. } => match item {
 723                copilot_responses::ResponseOutputItem::Message { id, .. } => {
 724                    vec![Ok(LanguageModelCompletionEvent::StartMessage {
 725                        message_id: id,
 726                    })]
 727                }
 728                _ => Vec::new(),
 729            },
 730
 731            copilot_responses::StreamEvent::OutputTextDelta { delta, .. } => {
 732                if delta.is_empty() {
 733                    Vec::new()
 734                } else {
 735                    vec![Ok(LanguageModelCompletionEvent::Text(delta))]
 736                }
 737            }
 738
 739            copilot_responses::StreamEvent::OutputItemDone { item, .. } => match item {
 740                copilot_responses::ResponseOutputItem::Message { .. } => Vec::new(),
 741                copilot_responses::ResponseOutputItem::FunctionCall {
 742                    call_id,
 743                    name,
 744                    arguments,
 745                    thought_signature,
 746                    ..
 747                } => {
 748                    let mut events = Vec::new();
 749                    match parse_tool_arguments(&arguments) {
 750                        Ok(input) => events.push(Ok(LanguageModelCompletionEvent::ToolUse(
 751                            LanguageModelToolUse {
 752                                id: call_id.into(),
 753                                name: name.as_str().into(),
 754                                is_input_complete: true,
 755                                input,
 756                                raw_input: arguments.clone(),
 757                                thought_signature,
 758                            },
 759                        ))),
 760                        Err(error) => {
 761                            events.push(Ok(LanguageModelCompletionEvent::ToolUseJsonParseError {
 762                                id: call_id.into(),
 763                                tool_name: name.as_str().into(),
 764                                raw_input: arguments.clone().into(),
 765                                json_parse_error: error.to_string(),
 766                            }))
 767                        }
 768                    }
 769                    // Record that we already emitted a tool-use stop so we can avoid duplicating
 770                    // a Stop event on Completed.
 771                    self.pending_stop_reason = Some(StopReason::ToolUse);
 772                    events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::ToolUse)));
 773                    events
 774                }
 775                copilot_responses::ResponseOutputItem::Reasoning {
 776                    summary,
 777                    encrypted_content,
 778                    ..
 779                } => {
 780                    let mut events = Vec::new();
 781
 782                    if let Some(blocks) = summary {
 783                        let mut text = String::new();
 784                        for block in blocks {
 785                            text.push_str(&block.text);
 786                        }
 787                        if !text.is_empty() {
 788                            events.push(Ok(LanguageModelCompletionEvent::Thinking {
 789                                text,
 790                                signature: None,
 791                            }));
 792                        }
 793                    }
 794
 795                    if let Some(data) = encrypted_content {
 796                        events.push(Ok(LanguageModelCompletionEvent::RedactedThinking { data }));
 797                    }
 798
 799                    events
 800                }
 801            },
 802
 803            copilot_responses::StreamEvent::Completed { response } => {
 804                let mut events = Vec::new();
 805                if let Some(usage) = response.usage {
 806                    events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
 807                        input_tokens: usage.input_tokens.unwrap_or(0),
 808                        output_tokens: usage.output_tokens.unwrap_or(0),
 809                        cache_creation_input_tokens: 0,
 810                        cache_read_input_tokens: 0,
 811                    })));
 812                }
 813                if self.pending_stop_reason.take() != Some(StopReason::ToolUse) {
 814                    events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::EndTurn)));
 815                }
 816                events
 817            }
 818
 819            copilot_responses::StreamEvent::Incomplete { response } => {
 820                let reason = response
 821                    .incomplete_details
 822                    .as_ref()
 823                    .and_then(|details| details.reason.as_ref());
 824                let stop_reason = match reason {
 825                    Some(copilot_responses::IncompleteReason::MaxOutputTokens) => {
 826                        StopReason::MaxTokens
 827                    }
 828                    Some(copilot_responses::IncompleteReason::ContentFilter) => StopReason::Refusal,
 829                    _ => self
 830                        .pending_stop_reason
 831                        .take()
 832                        .unwrap_or(StopReason::EndTurn),
 833                };
 834
 835                let mut events = Vec::new();
 836                if let Some(usage) = response.usage {
 837                    events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
 838                        input_tokens: usage.input_tokens.unwrap_or(0),
 839                        output_tokens: usage.output_tokens.unwrap_or(0),
 840                        cache_creation_input_tokens: 0,
 841                        cache_read_input_tokens: 0,
 842                    })));
 843                }
 844                events.push(Ok(LanguageModelCompletionEvent::Stop(stop_reason)));
 845                events
 846            }
 847
 848            copilot_responses::StreamEvent::Failed { response } => {
 849                let provider = PROVIDER_NAME;
 850                let (status_code, message) = match response.error {
 851                    Some(error) => {
 852                        let status_code = StatusCode::from_str(&error.code)
 853                            .unwrap_or(StatusCode::INTERNAL_SERVER_ERROR);
 854                        (status_code, error.message)
 855                    }
 856                    None => (
 857                        StatusCode::INTERNAL_SERVER_ERROR,
 858                        "response.failed".to_string(),
 859                    ),
 860                };
 861                vec![Err(LanguageModelCompletionError::HttpResponseError {
 862                    provider,
 863                    status_code,
 864                    message,
 865                })]
 866            }
 867
 868            copilot_responses::StreamEvent::GenericError { error } => vec![Err(
 869                LanguageModelCompletionError::Other(anyhow!(error.message)),
 870            )],
 871
 872            copilot_responses::StreamEvent::Created { .. }
 873            | copilot_responses::StreamEvent::Unknown => Vec::new(),
 874        }
 875    }
 876}
 877
 878fn into_copilot_chat(
 879    model: &CopilotChatModel,
 880    request: LanguageModelRequest,
 881) -> Result<CopilotChatRequest> {
 882    let temperature = request.temperature;
 883    let tool_choice = request.tool_choice;
 884
 885    let mut request_messages: Vec<LanguageModelRequestMessage> = Vec::new();
 886    for message in request.messages {
 887        if let Some(last_message) = request_messages.last_mut() {
 888            if last_message.role == message.role {
 889                last_message.content.extend(message.content);
 890            } else {
 891                request_messages.push(message);
 892            }
 893        } else {
 894            request_messages.push(message);
 895        }
 896    }
 897
 898    let mut messages: Vec<ChatMessage> = Vec::new();
 899    for message in request_messages {
 900        match message.role {
 901            Role::User => {
 902                for content in &message.content {
 903                    if let MessageContent::ToolResult(tool_result) = content {
 904                        let content = match &tool_result.content {
 905                            LanguageModelToolResultContent::Text(text) => text.to_string().into(),
 906                            LanguageModelToolResultContent::Image(image) => {
 907                                if model.supports_vision() {
 908                                    ChatMessageContent::Multipart(vec![ChatMessagePart::Image {
 909                                        image_url: ImageUrl {
 910                                            url: image.to_base64_url(),
 911                                        },
 912                                    }])
 913                                } else {
 914                                    debug_panic!(
 915                                        "This should be caught at {} level",
 916                                        tool_result.tool_name
 917                                    );
 918                                    "[Tool responded with an image, but this model does not support vision]".to_string().into()
 919                                }
 920                            }
 921                        };
 922
 923                        messages.push(ChatMessage::Tool {
 924                            tool_call_id: tool_result.tool_use_id.to_string(),
 925                            content,
 926                        });
 927                    }
 928                }
 929
 930                let mut content_parts = Vec::new();
 931                for content in &message.content {
 932                    match content {
 933                        MessageContent::Text(text) | MessageContent::Thinking { text, .. }
 934                            if !text.is_empty() =>
 935                        {
 936                            if let Some(ChatMessagePart::Text { text: text_content }) =
 937                                content_parts.last_mut()
 938                            {
 939                                text_content.push_str(text);
 940                            } else {
 941                                content_parts.push(ChatMessagePart::Text {
 942                                    text: text.to_string(),
 943                                });
 944                            }
 945                        }
 946                        MessageContent::Image(image) if model.supports_vision() => {
 947                            content_parts.push(ChatMessagePart::Image {
 948                                image_url: ImageUrl {
 949                                    url: image.to_base64_url(),
 950                                },
 951                            });
 952                        }
 953                        _ => {}
 954                    }
 955                }
 956
 957                if !content_parts.is_empty() {
 958                    messages.push(ChatMessage::User {
 959                        content: content_parts.into(),
 960                    });
 961                }
 962            }
 963            Role::Assistant => {
 964                let mut tool_calls = Vec::new();
 965                for content in &message.content {
 966                    if let MessageContent::ToolUse(tool_use) = content {
 967                        tool_calls.push(ToolCall {
 968                            id: tool_use.id.to_string(),
 969                            content: ToolCallContent::Function {
 970                                function: FunctionContent {
 971                                    name: tool_use.name.to_string(),
 972                                    arguments: serde_json::to_string(&tool_use.input)?,
 973                                    thought_signature: tool_use.thought_signature.clone(),
 974                                },
 975                            },
 976                        });
 977                    }
 978                }
 979
 980                let text_content = {
 981                    let mut buffer = String::new();
 982                    for string in message.content.iter().filter_map(|content| match content {
 983                        MessageContent::Text(text) => Some(text.as_str()),
 984                        MessageContent::Thinking { .. }
 985                        | MessageContent::ToolUse(_)
 986                        | MessageContent::RedactedThinking(_)
 987                        | MessageContent::ToolResult(_)
 988                        | MessageContent::Image(_) => None,
 989                    }) {
 990                        buffer.push_str(string);
 991                    }
 992
 993                    buffer
 994                };
 995
 996                // Extract reasoning_opaque and reasoning_text from reasoning_details
 997                let (reasoning_opaque, reasoning_text) =
 998                    if let Some(details) = &message.reasoning_details {
 999                        let opaque = details
1000                            .get("reasoning_opaque")
1001                            .and_then(|v| v.as_str())
1002                            .map(|s| s.to_string());
1003                        let text = details
1004                            .get("reasoning_text")
1005                            .and_then(|v| v.as_str())
1006                            .map(|s| s.to_string());
1007                        (opaque, text)
1008                    } else {
1009                        (None, None)
1010                    };
1011
1012                messages.push(ChatMessage::Assistant {
1013                    content: if text_content.is_empty() {
1014                        ChatMessageContent::empty()
1015                    } else {
1016                        text_content.into()
1017                    },
1018                    tool_calls,
1019                    reasoning_opaque,
1020                    reasoning_text,
1021                });
1022            }
1023            Role::System => messages.push(ChatMessage::System {
1024                content: message.string_contents(),
1025            }),
1026        }
1027    }
1028
1029    let tools = request
1030        .tools
1031        .iter()
1032        .map(|tool| Tool::Function {
1033            function: Function {
1034                name: tool.name.clone(),
1035                description: tool.description.clone(),
1036                parameters: tool.input_schema.clone(),
1037            },
1038        })
1039        .collect::<Vec<_>>();
1040
1041    Ok(CopilotChatRequest {
1042        n: 1,
1043        stream: model.uses_streaming(),
1044        temperature: temperature.unwrap_or(0.1),
1045        model: model.id().to_string(),
1046        messages,
1047        tools,
1048        tool_choice: tool_choice.map(|choice| match choice {
1049            LanguageModelToolChoice::Auto => ToolChoice::Auto,
1050            LanguageModelToolChoice::Any => ToolChoice::Any,
1051            LanguageModelToolChoice::None => ToolChoice::None,
1052        }),
1053        thinking_budget: None,
1054    })
1055}
1056
1057fn compute_thinking_budget(
1058    min_budget: Option<u32>,
1059    max_budget: Option<u32>,
1060    max_output_tokens: u32,
1061) -> Option<u32> {
1062    let configured_budget: u32 = 16000;
1063    let min_budget = min_budget.unwrap_or(1024);
1064    let max_budget = max_budget.unwrap_or(max_output_tokens.saturating_sub(1));
1065    let normalized = configured_budget.max(min_budget);
1066    Some(
1067        normalized
1068            .min(max_budget)
1069            .min(max_output_tokens.saturating_sub(1)),
1070    )
1071}
1072
1073fn intent_to_chat_location(intent: Option<CompletionIntent>) -> ChatLocation {
1074    match intent {
1075        Some(CompletionIntent::UserPrompt) => ChatLocation::Agent,
1076        Some(CompletionIntent::Subagent) => ChatLocation::Agent,
1077        Some(CompletionIntent::ToolResults) => ChatLocation::Agent,
1078        Some(CompletionIntent::ThreadSummarization) => ChatLocation::Panel,
1079        Some(CompletionIntent::ThreadContextSummarization) => ChatLocation::Panel,
1080        Some(CompletionIntent::CreateFile) => ChatLocation::Agent,
1081        Some(CompletionIntent::EditFile) => ChatLocation::Agent,
1082        Some(CompletionIntent::InlineAssist) => ChatLocation::Editor,
1083        Some(CompletionIntent::TerminalInlineAssist) => ChatLocation::Terminal,
1084        Some(CompletionIntent::GenerateGitCommitMessage) => ChatLocation::Other,
1085        None => ChatLocation::Panel,
1086    }
1087}
1088
1089fn into_copilot_responses(
1090    model: &CopilotChatModel,
1091    request: LanguageModelRequest,
1092) -> copilot_responses::Request {
1093    use copilot_responses as responses;
1094
1095    let LanguageModelRequest {
1096        thread_id: _,
1097        prompt_id: _,
1098        intent: _,
1099        messages,
1100        tools,
1101        tool_choice,
1102        stop: _,
1103        temperature,
1104        thinking_allowed,
1105        thinking_effort: _,
1106        speed: _,
1107    } = request;
1108
1109    let mut input_items: Vec<responses::ResponseInputItem> = Vec::new();
1110
1111    for message in messages {
1112        match message.role {
1113            Role::User => {
1114                for content in &message.content {
1115                    if let MessageContent::ToolResult(tool_result) = content {
1116                        let output = match &tool_result.content {
1117                            LanguageModelToolResultContent::Text(text) => {
1118                                responses::ResponseFunctionOutput::Text(text.to_string())
1119                            }
1120                            LanguageModelToolResultContent::Image(image) => {
1121                                if model.supports_vision() {
1122                                    responses::ResponseFunctionOutput::Content(vec![
1123                                        responses::ResponseInputContent::InputImage {
1124                                            image_url: Some(image.to_base64_url()),
1125                                            detail: Default::default(),
1126                                        },
1127                                    ])
1128                                } else {
1129                                    debug_panic!(
1130                                        "This should be caught at {} level",
1131                                        tool_result.tool_name
1132                                    );
1133                                    responses::ResponseFunctionOutput::Text(
1134                                            "[Tool responded with an image, but this model does not support vision]".into(),
1135                                        )
1136                                }
1137                            }
1138                        };
1139
1140                        input_items.push(responses::ResponseInputItem::FunctionCallOutput {
1141                            call_id: tool_result.tool_use_id.to_string(),
1142                            output,
1143                            status: None,
1144                        });
1145                    }
1146                }
1147
1148                let mut parts: Vec<responses::ResponseInputContent> = Vec::new();
1149                for content in &message.content {
1150                    match content {
1151                        MessageContent::Text(text) => {
1152                            parts.push(responses::ResponseInputContent::InputText {
1153                                text: text.clone(),
1154                            });
1155                        }
1156
1157                        MessageContent::Image(image) => {
1158                            if model.supports_vision() {
1159                                parts.push(responses::ResponseInputContent::InputImage {
1160                                    image_url: Some(image.to_base64_url()),
1161                                    detail: Default::default(),
1162                                });
1163                            }
1164                        }
1165                        _ => {}
1166                    }
1167                }
1168
1169                if !parts.is_empty() {
1170                    input_items.push(responses::ResponseInputItem::Message {
1171                        role: "user".into(),
1172                        content: Some(parts),
1173                        status: None,
1174                    });
1175                }
1176            }
1177
1178            Role::Assistant => {
1179                for content in &message.content {
1180                    if let MessageContent::ToolUse(tool_use) = content {
1181                        input_items.push(responses::ResponseInputItem::FunctionCall {
1182                            call_id: tool_use.id.to_string(),
1183                            name: tool_use.name.to_string(),
1184                            arguments: tool_use.raw_input.clone(),
1185                            status: None,
1186                            thought_signature: tool_use.thought_signature.clone(),
1187                        });
1188                    }
1189                }
1190
1191                for content in &message.content {
1192                    if let MessageContent::RedactedThinking(data) = content {
1193                        input_items.push(responses::ResponseInputItem::Reasoning {
1194                            id: None,
1195                            summary: Vec::new(),
1196                            encrypted_content: data.clone(),
1197                        });
1198                    }
1199                }
1200
1201                let mut parts: Vec<responses::ResponseInputContent> = Vec::new();
1202                for content in &message.content {
1203                    match content {
1204                        MessageContent::Text(text) => {
1205                            parts.push(responses::ResponseInputContent::OutputText {
1206                                text: text.clone(),
1207                            });
1208                        }
1209                        MessageContent::Image(_) => {
1210                            parts.push(responses::ResponseInputContent::OutputText {
1211                                text: "[image omitted]".to_string(),
1212                            });
1213                        }
1214                        _ => {}
1215                    }
1216                }
1217
1218                if !parts.is_empty() {
1219                    input_items.push(responses::ResponseInputItem::Message {
1220                        role: "assistant".into(),
1221                        content: Some(parts),
1222                        status: Some("completed".into()),
1223                    });
1224                }
1225            }
1226
1227            Role::System => {
1228                let mut parts: Vec<responses::ResponseInputContent> = Vec::new();
1229                for content in &message.content {
1230                    if let MessageContent::Text(text) = content {
1231                        parts.push(responses::ResponseInputContent::InputText {
1232                            text: text.clone(),
1233                        });
1234                    }
1235                }
1236
1237                if !parts.is_empty() {
1238                    input_items.push(responses::ResponseInputItem::Message {
1239                        role: "system".into(),
1240                        content: Some(parts),
1241                        status: None,
1242                    });
1243                }
1244            }
1245        }
1246    }
1247
1248    let converted_tools: Vec<responses::ToolDefinition> = tools
1249        .into_iter()
1250        .map(|tool| responses::ToolDefinition::Function {
1251            name: tool.name,
1252            description: Some(tool.description),
1253            parameters: Some(tool.input_schema),
1254            strict: None,
1255        })
1256        .collect();
1257
1258    let mapped_tool_choice = tool_choice.map(|choice| match choice {
1259        LanguageModelToolChoice::Auto => responses::ToolChoice::Auto,
1260        LanguageModelToolChoice::Any => responses::ToolChoice::Any,
1261        LanguageModelToolChoice::None => responses::ToolChoice::None,
1262    });
1263
1264    responses::Request {
1265        model: model.id().to_string(),
1266        input: input_items,
1267        stream: model.uses_streaming(),
1268        temperature,
1269        tools: converted_tools,
1270        tool_choice: mapped_tool_choice,
1271        reasoning: if thinking_allowed {
1272            Some(copilot_responses::ReasoningConfig {
1273                effort: copilot_responses::ReasoningEffort::Medium,
1274                summary: Some(copilot_responses::ReasoningSummary::Detailed),
1275            })
1276        } else {
1277            None
1278        },
1279        include: Some(vec![
1280            copilot_responses::ResponseIncludable::ReasoningEncryptedContent,
1281        ]),
1282        store: false,
1283    }
1284}
1285
1286#[cfg(test)]
1287mod tests {
1288    use super::*;
1289    use copilot_chat::responses;
1290    use futures::StreamExt;
1291
1292    fn map_events(events: Vec<responses::StreamEvent>) -> Vec<LanguageModelCompletionEvent> {
1293        futures::executor::block_on(async {
1294            CopilotResponsesEventMapper::new()
1295                .map_stream(Box::pin(futures::stream::iter(events.into_iter().map(Ok))))
1296                .collect::<Vec<_>>()
1297                .await
1298                .into_iter()
1299                .map(Result::unwrap)
1300                .collect()
1301        })
1302    }
1303
1304    #[test]
1305    fn responses_stream_maps_text_and_usage() {
1306        let events = vec![
1307            responses::StreamEvent::OutputItemAdded {
1308                output_index: 0,
1309                sequence_number: None,
1310                item: responses::ResponseOutputItem::Message {
1311                    id: "msg_1".into(),
1312                    role: "assistant".into(),
1313                    content: Some(Vec::new()),
1314                },
1315            },
1316            responses::StreamEvent::OutputTextDelta {
1317                item_id: "msg_1".into(),
1318                output_index: 0,
1319                delta: "Hello".into(),
1320            },
1321            responses::StreamEvent::Completed {
1322                response: responses::Response {
1323                    usage: Some(responses::ResponseUsage {
1324                        input_tokens: Some(5),
1325                        output_tokens: Some(3),
1326                        total_tokens: Some(8),
1327                    }),
1328                    ..Default::default()
1329                },
1330            },
1331        ];
1332
1333        let mapped = map_events(events);
1334        assert!(matches!(
1335            mapped[0],
1336            LanguageModelCompletionEvent::StartMessage { ref message_id } if message_id == "msg_1"
1337        ));
1338        assert!(matches!(
1339            mapped[1],
1340            LanguageModelCompletionEvent::Text(ref text) if text == "Hello"
1341        ));
1342        assert!(matches!(
1343            mapped[2],
1344            LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
1345                input_tokens: 5,
1346                output_tokens: 3,
1347                ..
1348            })
1349        ));
1350        assert!(matches!(
1351            mapped[3],
1352            LanguageModelCompletionEvent::Stop(StopReason::EndTurn)
1353        ));
1354    }
1355
1356    #[test]
1357    fn responses_stream_maps_tool_calls() {
1358        let events = vec![responses::StreamEvent::OutputItemDone {
1359            output_index: 0,
1360            sequence_number: None,
1361            item: responses::ResponseOutputItem::FunctionCall {
1362                id: Some("fn_1".into()),
1363                call_id: "call_1".into(),
1364                name: "do_it".into(),
1365                arguments: "{\"x\":1}".into(),
1366                status: None,
1367                thought_signature: None,
1368            },
1369        }];
1370
1371        let mapped = map_events(events);
1372        assert!(matches!(
1373            mapped[0],
1374            LanguageModelCompletionEvent::ToolUse(ref use_) if use_.id.to_string() == "call_1" && use_.name.as_ref() == "do_it"
1375        ));
1376        assert!(matches!(
1377            mapped[1],
1378            LanguageModelCompletionEvent::Stop(StopReason::ToolUse)
1379        ));
1380    }
1381
1382    #[test]
1383    fn responses_stream_handles_json_parse_error() {
1384        let events = vec![responses::StreamEvent::OutputItemDone {
1385            output_index: 0,
1386            sequence_number: None,
1387            item: responses::ResponseOutputItem::FunctionCall {
1388                id: Some("fn_1".into()),
1389                call_id: "call_1".into(),
1390                name: "do_it".into(),
1391                arguments: "{not json}".into(),
1392                status: None,
1393                thought_signature: None,
1394            },
1395        }];
1396
1397        let mapped = map_events(events);
1398        assert!(matches!(
1399            mapped[0],
1400            LanguageModelCompletionEvent::ToolUseJsonParseError { ref id, ref tool_name, .. }
1401                if id.to_string() == "call_1" && tool_name.as_ref() == "do_it"
1402        ));
1403        assert!(matches!(
1404            mapped[1],
1405            LanguageModelCompletionEvent::Stop(StopReason::ToolUse)
1406        ));
1407    }
1408
1409    #[test]
1410    fn responses_stream_maps_reasoning_summary_and_encrypted_content() {
1411        let events = vec![responses::StreamEvent::OutputItemDone {
1412            output_index: 0,
1413            sequence_number: None,
1414            item: responses::ResponseOutputItem::Reasoning {
1415                id: "r1".into(),
1416                summary: Some(vec![responses::ResponseReasoningItem {
1417                    kind: "summary_text".into(),
1418                    text: "Chain".into(),
1419                }]),
1420                encrypted_content: Some("ENC".into()),
1421            },
1422        }];
1423
1424        let mapped = map_events(events);
1425        assert!(matches!(
1426            mapped[0],
1427            LanguageModelCompletionEvent::Thinking { ref text, signature: None } if text == "Chain"
1428        ));
1429        assert!(matches!(
1430            mapped[1],
1431            LanguageModelCompletionEvent::RedactedThinking { ref data } if data == "ENC"
1432        ));
1433    }
1434
1435    #[test]
1436    fn responses_stream_handles_incomplete_max_tokens() {
1437        let events = vec![responses::StreamEvent::Incomplete {
1438            response: responses::Response {
1439                usage: Some(responses::ResponseUsage {
1440                    input_tokens: Some(10),
1441                    output_tokens: Some(0),
1442                    total_tokens: Some(10),
1443                }),
1444                incomplete_details: Some(responses::IncompleteDetails {
1445                    reason: Some(responses::IncompleteReason::MaxOutputTokens),
1446                }),
1447                ..Default::default()
1448            },
1449        }];
1450
1451        let mapped = map_events(events);
1452        assert!(matches!(
1453            mapped[0],
1454            LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
1455                input_tokens: 10,
1456                output_tokens: 0,
1457                ..
1458            })
1459        ));
1460        assert!(matches!(
1461            mapped[1],
1462            LanguageModelCompletionEvent::Stop(StopReason::MaxTokens)
1463        ));
1464    }
1465
1466    #[test]
1467    fn responses_stream_handles_incomplete_content_filter() {
1468        let events = vec![responses::StreamEvent::Incomplete {
1469            response: responses::Response {
1470                usage: None,
1471                incomplete_details: Some(responses::IncompleteDetails {
1472                    reason: Some(responses::IncompleteReason::ContentFilter),
1473                }),
1474                ..Default::default()
1475            },
1476        }];
1477
1478        let mapped = map_events(events);
1479        assert!(matches!(
1480            mapped.last().unwrap(),
1481            LanguageModelCompletionEvent::Stop(StopReason::Refusal)
1482        ));
1483    }
1484
1485    #[test]
1486    fn responses_stream_completed_no_duplicate_after_tool_use() {
1487        let events = vec![
1488            responses::StreamEvent::OutputItemDone {
1489                output_index: 0,
1490                sequence_number: None,
1491                item: responses::ResponseOutputItem::FunctionCall {
1492                    id: Some("fn_1".into()),
1493                    call_id: "call_1".into(),
1494                    name: "do_it".into(),
1495                    arguments: "{}".into(),
1496                    status: None,
1497                    thought_signature: None,
1498                },
1499            },
1500            responses::StreamEvent::Completed {
1501                response: responses::Response::default(),
1502            },
1503        ];
1504
1505        let mapped = map_events(events);
1506
1507        let mut stop_count = 0usize;
1508        let mut saw_tool_use_stop = false;
1509        for event in mapped {
1510            if let LanguageModelCompletionEvent::Stop(reason) = event {
1511                stop_count += 1;
1512                if matches!(reason, StopReason::ToolUse) {
1513                    saw_tool_use_stop = true;
1514                }
1515            }
1516        }
1517        assert_eq!(stop_count, 1, "should emit exactly one Stop event");
1518        assert!(saw_tool_use_stop, "Stop reason should be ToolUse");
1519    }
1520
1521    #[test]
1522    fn responses_stream_failed_maps_http_response_error() {
1523        let events = vec![responses::StreamEvent::Failed {
1524            response: responses::Response {
1525                error: Some(responses::ResponseError {
1526                    code: "429".into(),
1527                    message: "too many requests".into(),
1528                }),
1529                ..Default::default()
1530            },
1531        }];
1532
1533        let mapped_results = futures::executor::block_on(async {
1534            CopilotResponsesEventMapper::new()
1535                .map_stream(Box::pin(futures::stream::iter(events.into_iter().map(Ok))))
1536                .collect::<Vec<_>>()
1537                .await
1538        });
1539
1540        assert_eq!(mapped_results.len(), 1);
1541        match &mapped_results[0] {
1542            Err(LanguageModelCompletionError::HttpResponseError {
1543                status_code,
1544                message,
1545                ..
1546            }) => {
1547                assert_eq!(*status_code, http_client::StatusCode::TOO_MANY_REQUESTS);
1548                assert_eq!(message, "too many requests");
1549            }
1550            other => panic!("expected HttpResponseError, got {:?}", other),
1551        }
1552    }
1553
1554    #[test]
1555    fn chat_completions_stream_maps_reasoning_data() {
1556        use copilot_chat::{
1557            FunctionChunk, ResponseChoice, ResponseDelta, ResponseEvent, Role, ToolCallChunk,
1558        };
1559
1560        let events = vec![
1561            ResponseEvent {
1562                choices: vec![ResponseChoice {
1563                    index: Some(0),
1564                    finish_reason: None,
1565                    delta: Some(ResponseDelta {
1566                        content: None,
1567                        role: Some(Role::Assistant),
1568                        tool_calls: vec![ToolCallChunk {
1569                            index: Some(0),
1570                            id: Some("call_abc123".to_string()),
1571                            function: Some(FunctionChunk {
1572                                name: Some("list_directory".to_string()),
1573                                arguments: Some("{\"path\":\"test\"}".to_string()),
1574                                thought_signature: None,
1575                            }),
1576                        }],
1577                        reasoning_opaque: Some("encrypted_reasoning_token_xyz".to_string()),
1578                        reasoning_text: Some("Let me check the directory".to_string()),
1579                    }),
1580                    message: None,
1581                }],
1582                id: "chatcmpl-123".to_string(),
1583                usage: None,
1584            },
1585            ResponseEvent {
1586                choices: vec![ResponseChoice {
1587                    index: Some(0),
1588                    finish_reason: Some("tool_calls".to_string()),
1589                    delta: Some(ResponseDelta {
1590                        content: None,
1591                        role: None,
1592                        tool_calls: vec![],
1593                        reasoning_opaque: None,
1594                        reasoning_text: None,
1595                    }),
1596                    message: None,
1597                }],
1598                id: "chatcmpl-123".to_string(),
1599                usage: None,
1600            },
1601        ];
1602
1603        let mapped = futures::executor::block_on(async {
1604            map_to_language_model_completion_events(
1605                Box::pin(futures::stream::iter(events.into_iter().map(Ok))),
1606                true,
1607            )
1608            .collect::<Vec<_>>()
1609            .await
1610        });
1611
1612        let mut has_reasoning_details = false;
1613        let mut has_tool_use = false;
1614        let mut reasoning_opaque_value: Option<String> = None;
1615        let mut reasoning_text_value: Option<String> = None;
1616
1617        for event_result in mapped {
1618            match event_result {
1619                Ok(LanguageModelCompletionEvent::ReasoningDetails(details)) => {
1620                    has_reasoning_details = true;
1621                    reasoning_opaque_value = details
1622                        .get("reasoning_opaque")
1623                        .and_then(|v| v.as_str())
1624                        .map(|s| s.to_string());
1625                    reasoning_text_value = details
1626                        .get("reasoning_text")
1627                        .and_then(|v| v.as_str())
1628                        .map(|s| s.to_string());
1629                }
1630                Ok(LanguageModelCompletionEvent::ToolUse(tool_use)) => {
1631                    has_tool_use = true;
1632                    assert_eq!(tool_use.id.to_string(), "call_abc123");
1633                    assert_eq!(tool_use.name.as_ref(), "list_directory");
1634                }
1635                _ => {}
1636            }
1637        }
1638
1639        assert!(
1640            has_reasoning_details,
1641            "Should emit ReasoningDetails event for Gemini 3 reasoning"
1642        );
1643        assert!(has_tool_use, "Should emit ToolUse event");
1644        assert_eq!(
1645            reasoning_opaque_value,
1646            Some("encrypted_reasoning_token_xyz".to_string()),
1647            "Should capture reasoning_opaque"
1648        );
1649        assert_eq!(
1650            reasoning_text_value,
1651            Some("Let me check the directory".to_string()),
1652            "Should capture reasoning_text"
1653        );
1654    }
1655}