copilot_chat.rs

   1use std::pin::Pin;
   2use std::str::FromStr as _;
   3use std::sync::Arc;
   4
   5use anthropic::AnthropicModelMode;
   6use anyhow::{Result, anyhow};
   7use cloud_llm_client::CompletionIntent;
   8use collections::HashMap;
   9use copilot::{GlobalCopilotAuth, Status};
  10use copilot_chat::responses as copilot_responses;
  11use copilot_chat::{
  12    ChatLocation, ChatMessage, ChatMessageContent, ChatMessagePart, CopilotChat,
  13    CopilotChatConfiguration, Function, FunctionContent, ImageUrl, Model as CopilotChatModel,
  14    ModelVendor, Request as CopilotChatRequest, ResponseEvent, Tool, ToolCall, ToolCallContent,
  15    ToolChoice,
  16};
  17use futures::future::BoxFuture;
  18use futures::stream::BoxStream;
  19use futures::{FutureExt, Stream, StreamExt};
  20use gpui::{AnyView, App, AsyncApp, Entity, Subscription, Task};
  21use http_client::StatusCode;
  22use language::language_settings::all_language_settings;
  23use language_model::{
  24    AuthenticateError, IconOrSvg, LanguageModel, LanguageModelCompletionError,
  25    LanguageModelCompletionEvent, LanguageModelCostInfo, LanguageModelEffortLevel, LanguageModelId,
  26    LanguageModelName, LanguageModelProvider, LanguageModelProviderId, LanguageModelProviderName,
  27    LanguageModelProviderState, LanguageModelRequest, LanguageModelRequestMessage,
  28    LanguageModelToolChoice, LanguageModelToolResultContent, LanguageModelToolSchemaFormat,
  29    LanguageModelToolUse, MessageContent, RateLimiter, Role, StopReason, TokenUsage,
  30};
  31use settings::SettingsStore;
  32use ui::prelude::*;
  33use util::debug_panic;
  34
  35use crate::provider::anthropic::{AnthropicEventMapper, into_anthropic};
  36use crate::provider::util::parse_tool_arguments;
  37
  38const PROVIDER_ID: LanguageModelProviderId = LanguageModelProviderId::new("copilot_chat");
  39const PROVIDER_NAME: LanguageModelProviderName =
  40    LanguageModelProviderName::new("GitHub Copilot Chat");
  41
  42pub struct CopilotChatLanguageModelProvider {
  43    state: Entity<State>,
  44}
  45
  46pub struct State {
  47    _copilot_chat_subscription: Option<Subscription>,
  48    _settings_subscription: Subscription,
  49}
  50
  51impl State {
  52    fn is_authenticated(&self, cx: &App) -> bool {
  53        CopilotChat::global(cx)
  54            .map(|m| m.read(cx).is_authenticated())
  55            .unwrap_or(false)
  56    }
  57}
  58
  59impl CopilotChatLanguageModelProvider {
  60    pub fn new(cx: &mut App) -> Self {
  61        let state = cx.new(|cx| {
  62            let copilot_chat_subscription = CopilotChat::global(cx)
  63                .map(|copilot_chat| cx.observe(&copilot_chat, |_, _, cx| cx.notify()));
  64            State {
  65                _copilot_chat_subscription: copilot_chat_subscription,
  66                _settings_subscription: cx.observe_global::<SettingsStore>(|_, cx| {
  67                    if let Some(copilot_chat) = CopilotChat::global(cx) {
  68                        let language_settings = all_language_settings(None, cx);
  69                        let configuration = CopilotChatConfiguration {
  70                            enterprise_uri: language_settings
  71                                .edit_predictions
  72                                .copilot
  73                                .enterprise_uri
  74                                .clone(),
  75                        };
  76                        copilot_chat.update(cx, |chat, cx| {
  77                            chat.set_configuration(configuration, cx);
  78                        });
  79                    }
  80                    cx.notify();
  81                }),
  82            }
  83        });
  84
  85        Self { state }
  86    }
  87
  88    fn create_language_model(&self, model: CopilotChatModel) -> Arc<dyn LanguageModel> {
  89        Arc::new(CopilotChatLanguageModel {
  90            model,
  91            request_limiter: RateLimiter::new(4),
  92        })
  93    }
  94}
  95
  96impl LanguageModelProviderState for CopilotChatLanguageModelProvider {
  97    type ObservableEntity = State;
  98
  99    fn observable_entity(&self) -> Option<Entity<Self::ObservableEntity>> {
 100        Some(self.state.clone())
 101    }
 102}
 103
 104impl LanguageModelProvider for CopilotChatLanguageModelProvider {
 105    fn id(&self) -> LanguageModelProviderId {
 106        PROVIDER_ID
 107    }
 108
 109    fn name(&self) -> LanguageModelProviderName {
 110        PROVIDER_NAME
 111    }
 112
 113    fn icon(&self) -> IconOrSvg {
 114        IconOrSvg::Icon(IconName::Copilot)
 115    }
 116
 117    fn default_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
 118        let models = CopilotChat::global(cx).and_then(|m| m.read(cx).models())?;
 119        models
 120            .first()
 121            .map(|model| self.create_language_model(model.clone()))
 122    }
 123
 124    fn default_fast_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
 125        // The default model should be Copilot Chat's 'base model', which is likely a relatively fast
 126        // model (e.g. 4o) and a sensible choice when considering premium requests
 127        self.default_model(cx)
 128    }
 129
 130    fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
 131        let Some(models) = CopilotChat::global(cx).and_then(|m| m.read(cx).models()) else {
 132            return Vec::new();
 133        };
 134        models
 135            .iter()
 136            .map(|model| self.create_language_model(model.clone()))
 137            .collect()
 138    }
 139
 140    fn is_authenticated(&self, cx: &App) -> bool {
 141        self.state.read(cx).is_authenticated(cx)
 142    }
 143
 144    fn authenticate(&self, cx: &mut App) -> Task<Result<(), AuthenticateError>> {
 145        if self.is_authenticated(cx) {
 146            return Task::ready(Ok(()));
 147        };
 148
 149        let Some(copilot) = GlobalCopilotAuth::try_global(cx).cloned() else {
 150            return Task::ready(Err(anyhow!(concat!(
 151                "Copilot must be enabled for Copilot Chat to work. ",
 152                "Please enable Copilot and try again."
 153            ))
 154            .into()));
 155        };
 156
 157        let err = match copilot.0.read(cx).status() {
 158            Status::Authorized => return Task::ready(Ok(())),
 159            Status::Disabled => anyhow!(
 160                "Copilot must be enabled for Copilot Chat to work. Please enable Copilot and try again."
 161            ),
 162            Status::Error(err) => anyhow!(format!(
 163                "Received the following error while signing into Copilot: {err}"
 164            )),
 165            Status::Starting { task: _ } => anyhow!(
 166                "Copilot is still starting, please wait for Copilot to start then try again"
 167            ),
 168            Status::Unauthorized => anyhow!(
 169                "Unable to authorize with Copilot. Please make sure that you have an active Copilot and Copilot Chat subscription."
 170            ),
 171            Status::SignedOut { .. } => {
 172                anyhow!("You have signed out of Copilot. Please sign in to Copilot and try again.")
 173            }
 174            Status::SigningIn { prompt: _ } => anyhow!("Still signing into Copilot..."),
 175        };
 176
 177        Task::ready(Err(err.into()))
 178    }
 179
 180    fn configuration_view(
 181        &self,
 182        _target_agent: language_model::ConfigurationViewTargetAgent,
 183        _: &mut Window,
 184        cx: &mut App,
 185    ) -> AnyView {
 186        cx.new(|cx| {
 187            copilot_ui::ConfigurationView::new(
 188                |cx| {
 189                    CopilotChat::global(cx)
 190                        .map(|m| m.read(cx).is_authenticated())
 191                        .unwrap_or(false)
 192                },
 193                copilot_ui::ConfigurationMode::Chat,
 194                cx,
 195            )
 196        })
 197        .into()
 198    }
 199
 200    fn reset_credentials(&self, _cx: &mut App) -> Task<Result<()>> {
 201        Task::ready(Err(anyhow!(
 202            "Signing out of GitHub Copilot Chat is currently not supported."
 203        )))
 204    }
 205}
 206
 207fn collect_tiktoken_messages(
 208    request: LanguageModelRequest,
 209) -> Vec<tiktoken_rs::ChatCompletionRequestMessage> {
 210    request
 211        .messages
 212        .into_iter()
 213        .map(|message| tiktoken_rs::ChatCompletionRequestMessage {
 214            role: match message.role {
 215                Role::User => "user".into(),
 216                Role::Assistant => "assistant".into(),
 217                Role::System => "system".into(),
 218            },
 219            content: Some(message.string_contents()),
 220            name: None,
 221            function_call: None,
 222        })
 223        .collect::<Vec<_>>()
 224}
 225
 226pub struct CopilotChatLanguageModel {
 227    model: CopilotChatModel,
 228    request_limiter: RateLimiter,
 229}
 230
 231impl LanguageModel for CopilotChatLanguageModel {
 232    fn id(&self) -> LanguageModelId {
 233        LanguageModelId::from(self.model.id().to_string())
 234    }
 235
 236    fn name(&self) -> LanguageModelName {
 237        LanguageModelName::from(self.model.display_name().to_string())
 238    }
 239
 240    fn provider_id(&self) -> LanguageModelProviderId {
 241        PROVIDER_ID
 242    }
 243
 244    fn provider_name(&self) -> LanguageModelProviderName {
 245        PROVIDER_NAME
 246    }
 247
 248    fn supports_tools(&self) -> bool {
 249        self.model.supports_tools()
 250    }
 251
 252    fn supports_streaming_tools(&self) -> bool {
 253        true
 254    }
 255
 256    fn supports_images(&self) -> bool {
 257        self.model.supports_vision()
 258    }
 259
 260    fn supports_thinking(&self) -> bool {
 261        self.model.can_think()
 262    }
 263
 264    fn supported_effort_levels(&self) -> Vec<LanguageModelEffortLevel> {
 265        let levels = self.model.reasoning_effort_levels();
 266        if levels.is_empty() {
 267            return vec![];
 268        }
 269        levels
 270            .iter()
 271            .map(|level| {
 272                let name: SharedString = match level.as_str() {
 273                    "low" => "Low".into(),
 274                    "medium" => "Medium".into(),
 275                    "high" => "High".into(),
 276                    _ => SharedString::from(level.clone()),
 277                };
 278                LanguageModelEffortLevel {
 279                    name,
 280                    value: SharedString::from(level.clone()),
 281                    is_default: level == "high",
 282                }
 283            })
 284            .collect()
 285    }
 286
 287    fn tool_input_format(&self) -> LanguageModelToolSchemaFormat {
 288        match self.model.vendor() {
 289            ModelVendor::OpenAI | ModelVendor::Anthropic => {
 290                LanguageModelToolSchemaFormat::JsonSchema
 291            }
 292            ModelVendor::Google | ModelVendor::XAI | ModelVendor::Unknown => {
 293                LanguageModelToolSchemaFormat::JsonSchemaSubset
 294            }
 295        }
 296    }
 297
 298    fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
 299        match choice {
 300            LanguageModelToolChoice::Auto
 301            | LanguageModelToolChoice::Any
 302            | LanguageModelToolChoice::None => self.supports_tools(),
 303        }
 304    }
 305
 306    fn model_cost_info(&self) -> Option<LanguageModelCostInfo> {
 307        LanguageModelCostInfo::RequestCost {
 308            cost_per_request: self.model.multiplier(),
 309        }
 310        .into()
 311    }
 312
 313    fn telemetry_id(&self) -> String {
 314        format!("copilot_chat/{}", self.model.id())
 315    }
 316
 317    fn max_token_count(&self) -> u64 {
 318        self.model.max_token_count()
 319    }
 320
 321    fn count_tokens(
 322        &self,
 323        request: LanguageModelRequest,
 324        cx: &App,
 325    ) -> BoxFuture<'static, Result<u64>> {
 326        let model = self.model.clone();
 327        cx.background_spawn(async move {
 328            let messages = collect_tiktoken_messages(request);
 329            // Copilot uses OpenAI tiktoken tokenizer for all it's model irrespective of the underlying provider(vendor).
 330            let tokenizer_model = match model.tokenizer() {
 331                Some("o200k_base") => "gpt-4o",
 332                Some("cl100k_base") => "gpt-4",
 333                _ => "gpt-4o",
 334            };
 335
 336            tiktoken_rs::num_tokens_from_messages(tokenizer_model, &messages)
 337                .map(|tokens| tokens as u64)
 338        })
 339        .boxed()
 340    }
 341
 342    fn stream_completion(
 343        &self,
 344        request: LanguageModelRequest,
 345        cx: &AsyncApp,
 346    ) -> BoxFuture<
 347        'static,
 348        Result<
 349            BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>,
 350            LanguageModelCompletionError,
 351        >,
 352    > {
 353        let is_user_initiated = request.intent.is_none_or(|intent| match intent {
 354            CompletionIntent::UserPrompt
 355            | CompletionIntent::ThreadContextSummarization
 356            | CompletionIntent::InlineAssist
 357            | CompletionIntent::TerminalInlineAssist
 358            | CompletionIntent::GenerateGitCommitMessage => true,
 359
 360            CompletionIntent::ToolResults
 361            | CompletionIntent::ThreadSummarization
 362            | CompletionIntent::CreateFile
 363            | CompletionIntent::EditFile => false,
 364        });
 365
 366        if self.model.supports_messages() {
 367            let location = intent_to_chat_location(request.intent);
 368            let model = self.model.clone();
 369            let request_limiter = self.request_limiter.clone();
 370            let future = cx.spawn(async move |cx| {
 371                let effort = request
 372                    .thinking_effort
 373                    .as_ref()
 374                    .and_then(|e| anthropic::Effort::from_str(e).ok());
 375
 376                let mut anthropic_request = into_anthropic(
 377                    request,
 378                    model.id().to_string(),
 379                    0.0,
 380                    model.max_output_tokens() as u64,
 381                    if model.supports_adaptive_thinking() {
 382                        AnthropicModelMode::Thinking {
 383                            budget_tokens: None,
 384                        }
 385                    } else if model.can_think() {
 386                        AnthropicModelMode::Thinking {
 387                            budget_tokens: compute_thinking_budget(
 388                                model.min_thinking_budget(),
 389                                model.max_thinking_budget(),
 390                                model.max_output_tokens() as u32,
 391                            ),
 392                        }
 393                    } else {
 394                        AnthropicModelMode::Default
 395                    },
 396                );
 397
 398                anthropic_request.temperature = None;
 399
 400                // The Copilot proxy doesn't support eager_input_streaming on tools.
 401                for tool in &mut anthropic_request.tools {
 402                    tool.eager_input_streaming = false;
 403                }
 404
 405                if model.supports_adaptive_thinking() {
 406                    if anthropic_request.thinking.is_some() {
 407                        anthropic_request.thinking = Some(anthropic::Thinking::Adaptive);
 408                        anthropic_request.output_config = Some(anthropic::OutputConfig { effort });
 409                    }
 410                }
 411
 412                let anthropic_beta = if !model.supports_adaptive_thinking() && model.can_think() {
 413                    Some("interleaved-thinking-2025-05-14".to_string())
 414                } else {
 415                    None
 416                };
 417
 418                let body = serde_json::to_string(&anthropic::StreamingRequest {
 419                    base: anthropic_request,
 420                    stream: true,
 421                })
 422                .map_err(|e| anyhow::anyhow!(e))?;
 423
 424                let stream = CopilotChat::stream_messages(
 425                    body,
 426                    location,
 427                    is_user_initiated,
 428                    anthropic_beta,
 429                    cx.clone(),
 430                );
 431
 432                request_limiter
 433                    .stream(async move {
 434                        let events = stream.await?;
 435                        let mapper = AnthropicEventMapper::new();
 436                        Ok(mapper.map_stream(events).boxed())
 437                    })
 438                    .await
 439            });
 440            return async move { Ok(future.await?.boxed()) }.boxed();
 441        }
 442
 443        if self.model.supports_response() {
 444            let location = intent_to_chat_location(request.intent);
 445            let responses_request = into_copilot_responses(&self.model, request);
 446            let request_limiter = self.request_limiter.clone();
 447            let future = cx.spawn(async move |cx| {
 448                let request = CopilotChat::stream_response(
 449                    responses_request,
 450                    location,
 451                    is_user_initiated,
 452                    cx.clone(),
 453                );
 454                request_limiter
 455                    .stream(async move {
 456                        let stream = request.await?;
 457                        let mapper = CopilotResponsesEventMapper::new();
 458                        Ok(mapper.map_stream(stream).boxed())
 459                    })
 460                    .await
 461            });
 462            return async move { Ok(future.await?.boxed()) }.boxed();
 463        }
 464
 465        let location = intent_to_chat_location(request.intent);
 466        let copilot_request = match into_copilot_chat(&self.model, request) {
 467            Ok(request) => request,
 468            Err(err) => return futures::future::ready(Err(err.into())).boxed(),
 469        };
 470        let is_streaming = copilot_request.stream;
 471
 472        let request_limiter = self.request_limiter.clone();
 473        let future = cx.spawn(async move |cx| {
 474            let request = CopilotChat::stream_completion(
 475                copilot_request,
 476                location,
 477                is_user_initiated,
 478                cx.clone(),
 479            );
 480            request_limiter
 481                .stream(async move {
 482                    let response = request.await?;
 483                    Ok(map_to_language_model_completion_events(
 484                        response,
 485                        is_streaming,
 486                    ))
 487                })
 488                .await
 489        });
 490        async move { Ok(future.await?.boxed()) }.boxed()
 491    }
 492}
 493
 494pub fn map_to_language_model_completion_events(
 495    events: Pin<Box<dyn Send + Stream<Item = Result<ResponseEvent>>>>,
 496    is_streaming: bool,
 497) -> impl Stream<Item = Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
 498    #[derive(Default)]
 499    struct RawToolCall {
 500        id: String,
 501        name: String,
 502        arguments: String,
 503        thought_signature: Option<String>,
 504    }
 505
 506    struct State {
 507        events: Pin<Box<dyn Send + Stream<Item = Result<ResponseEvent>>>>,
 508        tool_calls_by_index: HashMap<usize, RawToolCall>,
 509        reasoning_opaque: Option<String>,
 510        reasoning_text: Option<String>,
 511    }
 512
 513    futures::stream::unfold(
 514        State {
 515            events,
 516            tool_calls_by_index: HashMap::default(),
 517            reasoning_opaque: None,
 518            reasoning_text: None,
 519        },
 520        move |mut state| async move {
 521            if let Some(event) = state.events.next().await {
 522                match event {
 523                    Ok(event) => {
 524                        let Some(choice) = event.choices.first() else {
 525                            return Some((
 526                                vec![Err(anyhow!("Response contained no choices").into())],
 527                                state,
 528                            ));
 529                        };
 530
 531                        let delta = if is_streaming {
 532                            choice.delta.as_ref()
 533                        } else {
 534                            choice.message.as_ref()
 535                        };
 536
 537                        let Some(delta) = delta else {
 538                            return Some((
 539                                vec![Err(anyhow!("Response contained no delta").into())],
 540                                state,
 541                            ));
 542                        };
 543
 544                        let mut events = Vec::new();
 545                        if let Some(content) = delta.content.clone() {
 546                            events.push(Ok(LanguageModelCompletionEvent::Text(content)));
 547                        }
 548
 549                        // Capture reasoning data from the delta (e.g. for Gemini 3)
 550                        if let Some(opaque) = delta.reasoning_opaque.clone() {
 551                            state.reasoning_opaque = Some(opaque);
 552                        }
 553                        if let Some(text) = delta.reasoning_text.clone() {
 554                            state.reasoning_text = Some(text);
 555                        }
 556
 557                        for (index, tool_call) in delta.tool_calls.iter().enumerate() {
 558                            let tool_index = tool_call.index.unwrap_or(index);
 559                            let entry = state.tool_calls_by_index.entry(tool_index).or_default();
 560
 561                            if let Some(tool_id) = tool_call.id.clone() {
 562                                entry.id = tool_id;
 563                            }
 564
 565                            if let Some(function) = tool_call.function.as_ref() {
 566                                if let Some(name) = function.name.clone() {
 567                                    entry.name = name;
 568                                }
 569
 570                                if let Some(arguments) = function.arguments.clone() {
 571                                    entry.arguments.push_str(&arguments);
 572                                }
 573
 574                                if let Some(thought_signature) = function.thought_signature.clone()
 575                                {
 576                                    entry.thought_signature = Some(thought_signature);
 577                                }
 578                            }
 579
 580                            if !entry.id.is_empty() && !entry.name.is_empty() {
 581                                if let Ok(input) = serde_json::from_str::<serde_json::Value>(
 582                                    &partial_json_fixer::fix_json(&entry.arguments),
 583                                ) {
 584                                    events.push(Ok(LanguageModelCompletionEvent::ToolUse(
 585                                        LanguageModelToolUse {
 586                                            id: entry.id.clone().into(),
 587                                            name: entry.name.as_str().into(),
 588                                            is_input_complete: false,
 589                                            input,
 590                                            raw_input: entry.arguments.clone(),
 591                                            thought_signature: entry.thought_signature.clone(),
 592                                        },
 593                                    )));
 594                                }
 595                            }
 596                        }
 597
 598                        if let Some(usage) = event.usage {
 599                            events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(
 600                                TokenUsage {
 601                                    input_tokens: usage.prompt_tokens,
 602                                    output_tokens: usage.completion_tokens,
 603                                    cache_creation_input_tokens: 0,
 604                                    cache_read_input_tokens: 0,
 605                                },
 606                            )));
 607                        }
 608
 609                        match choice.finish_reason.as_deref() {
 610                            Some("stop") => {
 611                                events.push(Ok(LanguageModelCompletionEvent::Stop(
 612                                    StopReason::EndTurn,
 613                                )));
 614                            }
 615                            Some("tool_calls") => {
 616                                // Gemini 3 models send reasoning_opaque/reasoning_text that must
 617                                // be preserved and sent back in subsequent requests. Emit as
 618                                // ReasoningDetails so the agent stores it in the message.
 619                                if state.reasoning_opaque.is_some()
 620                                    || state.reasoning_text.is_some()
 621                                {
 622                                    let mut details = serde_json::Map::new();
 623                                    if let Some(opaque) = state.reasoning_opaque.take() {
 624                                        details.insert(
 625                                            "reasoning_opaque".to_string(),
 626                                            serde_json::Value::String(opaque),
 627                                        );
 628                                    }
 629                                    if let Some(text) = state.reasoning_text.take() {
 630                                        details.insert(
 631                                            "reasoning_text".to_string(),
 632                                            serde_json::Value::String(text),
 633                                        );
 634                                    }
 635                                    events.push(Ok(
 636                                        LanguageModelCompletionEvent::ReasoningDetails(
 637                                            serde_json::Value::Object(details),
 638                                        ),
 639                                    ));
 640                                }
 641
 642                                events.extend(state.tool_calls_by_index.drain().map(
 643                                    |(_, tool_call)| match parse_tool_arguments(
 644                                        &tool_call.arguments,
 645                                    ) {
 646                                        Ok(input) => Ok(LanguageModelCompletionEvent::ToolUse(
 647                                            LanguageModelToolUse {
 648                                                id: tool_call.id.into(),
 649                                                name: tool_call.name.as_str().into(),
 650                                                is_input_complete: true,
 651                                                input,
 652                                                raw_input: tool_call.arguments,
 653                                                thought_signature: tool_call.thought_signature,
 654                                            },
 655                                        )),
 656                                        Err(error) => Ok(
 657                                            LanguageModelCompletionEvent::ToolUseJsonParseError {
 658                                                id: tool_call.id.into(),
 659                                                tool_name: tool_call.name.as_str().into(),
 660                                                raw_input: tool_call.arguments.into(),
 661                                                json_parse_error: error.to_string(),
 662                                            },
 663                                        ),
 664                                    },
 665                                ));
 666
 667                                events.push(Ok(LanguageModelCompletionEvent::Stop(
 668                                    StopReason::ToolUse,
 669                                )));
 670                            }
 671                            Some(stop_reason) => {
 672                                log::error!("Unexpected Copilot Chat stop_reason: {stop_reason:?}");
 673                                events.push(Ok(LanguageModelCompletionEvent::Stop(
 674                                    StopReason::EndTurn,
 675                                )));
 676                            }
 677                            None => {}
 678                        }
 679
 680                        return Some((events, state));
 681                    }
 682                    Err(err) => return Some((vec![Err(anyhow!(err).into())], state)),
 683                }
 684            }
 685
 686            None
 687        },
 688    )
 689    .flat_map(futures::stream::iter)
 690}
 691
 692pub struct CopilotResponsesEventMapper {
 693    pending_stop_reason: Option<StopReason>,
 694}
 695
 696impl CopilotResponsesEventMapper {
 697    pub fn new() -> Self {
 698        Self {
 699            pending_stop_reason: None,
 700        }
 701    }
 702
 703    pub fn map_stream(
 704        mut self,
 705        events: Pin<Box<dyn Send + Stream<Item = Result<copilot_responses::StreamEvent>>>>,
 706    ) -> impl Stream<Item = Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
 707    {
 708        events.flat_map(move |event| {
 709            futures::stream::iter(match event {
 710                Ok(event) => self.map_event(event),
 711                Err(error) => vec![Err(LanguageModelCompletionError::from(anyhow!(error)))],
 712            })
 713        })
 714    }
 715
 716    fn map_event(
 717        &mut self,
 718        event: copilot_responses::StreamEvent,
 719    ) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
 720        match event {
 721            copilot_responses::StreamEvent::OutputItemAdded { item, .. } => match item {
 722                copilot_responses::ResponseOutputItem::Message { id, .. } => {
 723                    vec![Ok(LanguageModelCompletionEvent::StartMessage {
 724                        message_id: id,
 725                    })]
 726                }
 727                _ => Vec::new(),
 728            },
 729
 730            copilot_responses::StreamEvent::OutputTextDelta { delta, .. } => {
 731                if delta.is_empty() {
 732                    Vec::new()
 733                } else {
 734                    vec![Ok(LanguageModelCompletionEvent::Text(delta))]
 735                }
 736            }
 737
 738            copilot_responses::StreamEvent::OutputItemDone { item, .. } => match item {
 739                copilot_responses::ResponseOutputItem::Message { .. } => Vec::new(),
 740                copilot_responses::ResponseOutputItem::FunctionCall {
 741                    call_id,
 742                    name,
 743                    arguments,
 744                    thought_signature,
 745                    ..
 746                } => {
 747                    let mut events = Vec::new();
 748                    match parse_tool_arguments(&arguments) {
 749                        Ok(input) => events.push(Ok(LanguageModelCompletionEvent::ToolUse(
 750                            LanguageModelToolUse {
 751                                id: call_id.into(),
 752                                name: name.as_str().into(),
 753                                is_input_complete: true,
 754                                input,
 755                                raw_input: arguments.clone(),
 756                                thought_signature,
 757                            },
 758                        ))),
 759                        Err(error) => {
 760                            events.push(Ok(LanguageModelCompletionEvent::ToolUseJsonParseError {
 761                                id: call_id.into(),
 762                                tool_name: name.as_str().into(),
 763                                raw_input: arguments.clone().into(),
 764                                json_parse_error: error.to_string(),
 765                            }))
 766                        }
 767                    }
 768                    // Record that we already emitted a tool-use stop so we can avoid duplicating
 769                    // a Stop event on Completed.
 770                    self.pending_stop_reason = Some(StopReason::ToolUse);
 771                    events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::ToolUse)));
 772                    events
 773                }
 774                copilot_responses::ResponseOutputItem::Reasoning {
 775                    summary,
 776                    encrypted_content,
 777                    ..
 778                } => {
 779                    let mut events = Vec::new();
 780
 781                    if let Some(blocks) = summary {
 782                        let mut text = String::new();
 783                        for block in blocks {
 784                            text.push_str(&block.text);
 785                        }
 786                        if !text.is_empty() {
 787                            events.push(Ok(LanguageModelCompletionEvent::Thinking {
 788                                text,
 789                                signature: None,
 790                            }));
 791                        }
 792                    }
 793
 794                    if let Some(data) = encrypted_content {
 795                        events.push(Ok(LanguageModelCompletionEvent::RedactedThinking { data }));
 796                    }
 797
 798                    events
 799                }
 800            },
 801
 802            copilot_responses::StreamEvent::Completed { response } => {
 803                let mut events = Vec::new();
 804                if let Some(usage) = response.usage {
 805                    events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
 806                        input_tokens: usage.input_tokens.unwrap_or(0),
 807                        output_tokens: usage.output_tokens.unwrap_or(0),
 808                        cache_creation_input_tokens: 0,
 809                        cache_read_input_tokens: 0,
 810                    })));
 811                }
 812                if self.pending_stop_reason.take() != Some(StopReason::ToolUse) {
 813                    events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::EndTurn)));
 814                }
 815                events
 816            }
 817
 818            copilot_responses::StreamEvent::Incomplete { response } => {
 819                let reason = response
 820                    .incomplete_details
 821                    .as_ref()
 822                    .and_then(|details| details.reason.as_ref());
 823                let stop_reason = match reason {
 824                    Some(copilot_responses::IncompleteReason::MaxOutputTokens) => {
 825                        StopReason::MaxTokens
 826                    }
 827                    Some(copilot_responses::IncompleteReason::ContentFilter) => StopReason::Refusal,
 828                    _ => self
 829                        .pending_stop_reason
 830                        .take()
 831                        .unwrap_or(StopReason::EndTurn),
 832                };
 833
 834                let mut events = Vec::new();
 835                if let Some(usage) = response.usage {
 836                    events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
 837                        input_tokens: usage.input_tokens.unwrap_or(0),
 838                        output_tokens: usage.output_tokens.unwrap_or(0),
 839                        cache_creation_input_tokens: 0,
 840                        cache_read_input_tokens: 0,
 841                    })));
 842                }
 843                events.push(Ok(LanguageModelCompletionEvent::Stop(stop_reason)));
 844                events
 845            }
 846
 847            copilot_responses::StreamEvent::Failed { response } => {
 848                let provider = PROVIDER_NAME;
 849                let (status_code, message) = match response.error {
 850                    Some(error) => {
 851                        let status_code = StatusCode::from_str(&error.code)
 852                            .unwrap_or(StatusCode::INTERNAL_SERVER_ERROR);
 853                        (status_code, error.message)
 854                    }
 855                    None => (
 856                        StatusCode::INTERNAL_SERVER_ERROR,
 857                        "response.failed".to_string(),
 858                    ),
 859                };
 860                vec![Err(LanguageModelCompletionError::HttpResponseError {
 861                    provider,
 862                    status_code,
 863                    message,
 864                })]
 865            }
 866
 867            copilot_responses::StreamEvent::GenericError { error } => vec![Err(
 868                LanguageModelCompletionError::Other(anyhow!(error.message)),
 869            )],
 870
 871            copilot_responses::StreamEvent::Created { .. }
 872            | copilot_responses::StreamEvent::Unknown => Vec::new(),
 873        }
 874    }
 875}
 876
 877fn into_copilot_chat(
 878    model: &CopilotChatModel,
 879    request: LanguageModelRequest,
 880) -> Result<CopilotChatRequest> {
 881    let temperature = request.temperature;
 882    let tool_choice = request.tool_choice;
 883
 884    let mut request_messages: Vec<LanguageModelRequestMessage> = Vec::new();
 885    for message in request.messages {
 886        if let Some(last_message) = request_messages.last_mut() {
 887            if last_message.role == message.role {
 888                last_message.content.extend(message.content);
 889            } else {
 890                request_messages.push(message);
 891            }
 892        } else {
 893            request_messages.push(message);
 894        }
 895    }
 896
 897    let mut messages: Vec<ChatMessage> = Vec::new();
 898    for message in request_messages {
 899        match message.role {
 900            Role::User => {
 901                for content in &message.content {
 902                    if let MessageContent::ToolResult(tool_result) = content {
 903                        let content = match &tool_result.content {
 904                            LanguageModelToolResultContent::Text(text) => text.to_string().into(),
 905                            LanguageModelToolResultContent::Image(image) => {
 906                                if model.supports_vision() {
 907                                    ChatMessageContent::Multipart(vec![ChatMessagePart::Image {
 908                                        image_url: ImageUrl {
 909                                            url: image.to_base64_url(),
 910                                        },
 911                                    }])
 912                                } else {
 913                                    debug_panic!(
 914                                        "This should be caught at {} level",
 915                                        tool_result.tool_name
 916                                    );
 917                                    "[Tool responded with an image, but this model does not support vision]".to_string().into()
 918                                }
 919                            }
 920                        };
 921
 922                        messages.push(ChatMessage::Tool {
 923                            tool_call_id: tool_result.tool_use_id.to_string(),
 924                            content,
 925                        });
 926                    }
 927                }
 928
 929                let mut content_parts = Vec::new();
 930                for content in &message.content {
 931                    match content {
 932                        MessageContent::Text(text) | MessageContent::Thinking { text, .. }
 933                            if !text.is_empty() =>
 934                        {
 935                            if let Some(ChatMessagePart::Text { text: text_content }) =
 936                                content_parts.last_mut()
 937                            {
 938                                text_content.push_str(text);
 939                            } else {
 940                                content_parts.push(ChatMessagePart::Text {
 941                                    text: text.to_string(),
 942                                });
 943                            }
 944                        }
 945                        MessageContent::Image(image) if model.supports_vision() => {
 946                            content_parts.push(ChatMessagePart::Image {
 947                                image_url: ImageUrl {
 948                                    url: image.to_base64_url(),
 949                                },
 950                            });
 951                        }
 952                        _ => {}
 953                    }
 954                }
 955
 956                if !content_parts.is_empty() {
 957                    messages.push(ChatMessage::User {
 958                        content: content_parts.into(),
 959                    });
 960                }
 961            }
 962            Role::Assistant => {
 963                let mut tool_calls = Vec::new();
 964                for content in &message.content {
 965                    if let MessageContent::ToolUse(tool_use) = content {
 966                        tool_calls.push(ToolCall {
 967                            id: tool_use.id.to_string(),
 968                            content: ToolCallContent::Function {
 969                                function: FunctionContent {
 970                                    name: tool_use.name.to_string(),
 971                                    arguments: serde_json::to_string(&tool_use.input)?,
 972                                    thought_signature: tool_use.thought_signature.clone(),
 973                                },
 974                            },
 975                        });
 976                    }
 977                }
 978
 979                let text_content = {
 980                    let mut buffer = String::new();
 981                    for string in message.content.iter().filter_map(|content| match content {
 982                        MessageContent::Text(text) => Some(text.as_str()),
 983                        MessageContent::Thinking { .. }
 984                        | MessageContent::ToolUse(_)
 985                        | MessageContent::RedactedThinking(_)
 986                        | MessageContent::ToolResult(_)
 987                        | MessageContent::Image(_) => None,
 988                    }) {
 989                        buffer.push_str(string);
 990                    }
 991
 992                    buffer
 993                };
 994
 995                // Extract reasoning_opaque and reasoning_text from reasoning_details
 996                let (reasoning_opaque, reasoning_text) =
 997                    if let Some(details) = &message.reasoning_details {
 998                        let opaque = details
 999                            .get("reasoning_opaque")
1000                            .and_then(|v| v.as_str())
1001                            .map(|s| s.to_string());
1002                        let text = details
1003                            .get("reasoning_text")
1004                            .and_then(|v| v.as_str())
1005                            .map(|s| s.to_string());
1006                        (opaque, text)
1007                    } else {
1008                        (None, None)
1009                    };
1010
1011                messages.push(ChatMessage::Assistant {
1012                    content: if text_content.is_empty() {
1013                        ChatMessageContent::empty()
1014                    } else {
1015                        text_content.into()
1016                    },
1017                    tool_calls,
1018                    reasoning_opaque,
1019                    reasoning_text,
1020                });
1021            }
1022            Role::System => messages.push(ChatMessage::System {
1023                content: message.string_contents(),
1024            }),
1025        }
1026    }
1027
1028    let tools = request
1029        .tools
1030        .iter()
1031        .map(|tool| Tool::Function {
1032            function: Function {
1033                name: tool.name.clone(),
1034                description: tool.description.clone(),
1035                parameters: tool.input_schema.clone(),
1036            },
1037        })
1038        .collect::<Vec<_>>();
1039
1040    Ok(CopilotChatRequest {
1041        n: 1,
1042        stream: model.uses_streaming(),
1043        temperature: temperature.unwrap_or(0.1),
1044        model: model.id().to_string(),
1045        messages,
1046        tools,
1047        tool_choice: tool_choice.map(|choice| match choice {
1048            LanguageModelToolChoice::Auto => ToolChoice::Auto,
1049            LanguageModelToolChoice::Any => ToolChoice::Any,
1050            LanguageModelToolChoice::None => ToolChoice::None,
1051        }),
1052        thinking_budget: None,
1053    })
1054}
1055
1056fn compute_thinking_budget(
1057    min_budget: Option<u32>,
1058    max_budget: Option<u32>,
1059    max_output_tokens: u32,
1060) -> Option<u32> {
1061    let configured_budget: u32 = 16000;
1062    let min_budget = min_budget.unwrap_or(1024);
1063    let max_budget = max_budget.unwrap_or(max_output_tokens.saturating_sub(1));
1064    let normalized = configured_budget.max(min_budget);
1065    Some(
1066        normalized
1067            .min(max_budget)
1068            .min(max_output_tokens.saturating_sub(1)),
1069    )
1070}
1071
1072fn intent_to_chat_location(intent: Option<CompletionIntent>) -> ChatLocation {
1073    match intent {
1074        Some(CompletionIntent::UserPrompt) => ChatLocation::Agent,
1075        Some(CompletionIntent::ToolResults) => ChatLocation::Agent,
1076        Some(CompletionIntent::ThreadSummarization) => ChatLocation::Panel,
1077        Some(CompletionIntent::ThreadContextSummarization) => ChatLocation::Panel,
1078        Some(CompletionIntent::CreateFile) => ChatLocation::Agent,
1079        Some(CompletionIntent::EditFile) => ChatLocation::Agent,
1080        Some(CompletionIntent::InlineAssist) => ChatLocation::Editor,
1081        Some(CompletionIntent::TerminalInlineAssist) => ChatLocation::Terminal,
1082        Some(CompletionIntent::GenerateGitCommitMessage) => ChatLocation::Other,
1083        None => ChatLocation::Panel,
1084    }
1085}
1086
1087fn into_copilot_responses(
1088    model: &CopilotChatModel,
1089    request: LanguageModelRequest,
1090) -> copilot_responses::Request {
1091    use copilot_responses as responses;
1092
1093    let LanguageModelRequest {
1094        thread_id: _,
1095        prompt_id: _,
1096        intent: _,
1097        messages,
1098        tools,
1099        tool_choice,
1100        stop: _,
1101        temperature,
1102        thinking_allowed,
1103        thinking_effort: _,
1104        speed: _,
1105    } = request;
1106
1107    let mut input_items: Vec<responses::ResponseInputItem> = Vec::new();
1108
1109    for message in messages {
1110        match message.role {
1111            Role::User => {
1112                for content in &message.content {
1113                    if let MessageContent::ToolResult(tool_result) = content {
1114                        let output = match &tool_result.content {
1115                            LanguageModelToolResultContent::Text(text) => {
1116                                responses::ResponseFunctionOutput::Text(text.to_string())
1117                            }
1118                            LanguageModelToolResultContent::Image(image) => {
1119                                if model.supports_vision() {
1120                                    responses::ResponseFunctionOutput::Content(vec![
1121                                        responses::ResponseInputContent::InputImage {
1122                                            image_url: Some(image.to_base64_url()),
1123                                            detail: Default::default(),
1124                                        },
1125                                    ])
1126                                } else {
1127                                    debug_panic!(
1128                                        "This should be caught at {} level",
1129                                        tool_result.tool_name
1130                                    );
1131                                    responses::ResponseFunctionOutput::Text(
1132                                            "[Tool responded with an image, but this model does not support vision]".into(),
1133                                        )
1134                                }
1135                            }
1136                        };
1137
1138                        input_items.push(responses::ResponseInputItem::FunctionCallOutput {
1139                            call_id: tool_result.tool_use_id.to_string(),
1140                            output,
1141                            status: None,
1142                        });
1143                    }
1144                }
1145
1146                let mut parts: Vec<responses::ResponseInputContent> = Vec::new();
1147                for content in &message.content {
1148                    match content {
1149                        MessageContent::Text(text) => {
1150                            parts.push(responses::ResponseInputContent::InputText {
1151                                text: text.clone(),
1152                            });
1153                        }
1154
1155                        MessageContent::Image(image) => {
1156                            if model.supports_vision() {
1157                                parts.push(responses::ResponseInputContent::InputImage {
1158                                    image_url: Some(image.to_base64_url()),
1159                                    detail: Default::default(),
1160                                });
1161                            }
1162                        }
1163                        _ => {}
1164                    }
1165                }
1166
1167                if !parts.is_empty() {
1168                    input_items.push(responses::ResponseInputItem::Message {
1169                        role: "user".into(),
1170                        content: Some(parts),
1171                        status: None,
1172                    });
1173                }
1174            }
1175
1176            Role::Assistant => {
1177                for content in &message.content {
1178                    if let MessageContent::ToolUse(tool_use) = content {
1179                        input_items.push(responses::ResponseInputItem::FunctionCall {
1180                            call_id: tool_use.id.to_string(),
1181                            name: tool_use.name.to_string(),
1182                            arguments: tool_use.raw_input.clone(),
1183                            status: None,
1184                            thought_signature: tool_use.thought_signature.clone(),
1185                        });
1186                    }
1187                }
1188
1189                for content in &message.content {
1190                    if let MessageContent::RedactedThinking(data) = content {
1191                        input_items.push(responses::ResponseInputItem::Reasoning {
1192                            id: None,
1193                            summary: Vec::new(),
1194                            encrypted_content: data.clone(),
1195                        });
1196                    }
1197                }
1198
1199                let mut parts: Vec<responses::ResponseInputContent> = Vec::new();
1200                for content in &message.content {
1201                    match content {
1202                        MessageContent::Text(text) => {
1203                            parts.push(responses::ResponseInputContent::OutputText {
1204                                text: text.clone(),
1205                            });
1206                        }
1207                        MessageContent::Image(_) => {
1208                            parts.push(responses::ResponseInputContent::OutputText {
1209                                text: "[image omitted]".to_string(),
1210                            });
1211                        }
1212                        _ => {}
1213                    }
1214                }
1215
1216                if !parts.is_empty() {
1217                    input_items.push(responses::ResponseInputItem::Message {
1218                        role: "assistant".into(),
1219                        content: Some(parts),
1220                        status: Some("completed".into()),
1221                    });
1222                }
1223            }
1224
1225            Role::System => {
1226                let mut parts: Vec<responses::ResponseInputContent> = Vec::new();
1227                for content in &message.content {
1228                    if let MessageContent::Text(text) = content {
1229                        parts.push(responses::ResponseInputContent::InputText {
1230                            text: text.clone(),
1231                        });
1232                    }
1233                }
1234
1235                if !parts.is_empty() {
1236                    input_items.push(responses::ResponseInputItem::Message {
1237                        role: "system".into(),
1238                        content: Some(parts),
1239                        status: None,
1240                    });
1241                }
1242            }
1243        }
1244    }
1245
1246    let converted_tools: Vec<responses::ToolDefinition> = tools
1247        .into_iter()
1248        .map(|tool| responses::ToolDefinition::Function {
1249            name: tool.name,
1250            description: Some(tool.description),
1251            parameters: Some(tool.input_schema),
1252            strict: None,
1253        })
1254        .collect();
1255
1256    let mapped_tool_choice = tool_choice.map(|choice| match choice {
1257        LanguageModelToolChoice::Auto => responses::ToolChoice::Auto,
1258        LanguageModelToolChoice::Any => responses::ToolChoice::Any,
1259        LanguageModelToolChoice::None => responses::ToolChoice::None,
1260    });
1261
1262    responses::Request {
1263        model: model.id().to_string(),
1264        input: input_items,
1265        stream: model.uses_streaming(),
1266        temperature,
1267        tools: converted_tools,
1268        tool_choice: mapped_tool_choice,
1269        reasoning: if thinking_allowed {
1270            Some(copilot_responses::ReasoningConfig {
1271                effort: copilot_responses::ReasoningEffort::Medium,
1272                summary: Some(copilot_responses::ReasoningSummary::Detailed),
1273            })
1274        } else {
1275            None
1276        },
1277        include: Some(vec![
1278            copilot_responses::ResponseIncludable::ReasoningEncryptedContent,
1279        ]),
1280        store: false,
1281    }
1282}
1283
1284#[cfg(test)]
1285mod tests {
1286    use super::*;
1287    use copilot_chat::responses;
1288    use futures::StreamExt;
1289
1290    fn map_events(events: Vec<responses::StreamEvent>) -> Vec<LanguageModelCompletionEvent> {
1291        futures::executor::block_on(async {
1292            CopilotResponsesEventMapper::new()
1293                .map_stream(Box::pin(futures::stream::iter(events.into_iter().map(Ok))))
1294                .collect::<Vec<_>>()
1295                .await
1296                .into_iter()
1297                .map(Result::unwrap)
1298                .collect()
1299        })
1300    }
1301
1302    #[test]
1303    fn responses_stream_maps_text_and_usage() {
1304        let events = vec![
1305            responses::StreamEvent::OutputItemAdded {
1306                output_index: 0,
1307                sequence_number: None,
1308                item: responses::ResponseOutputItem::Message {
1309                    id: "msg_1".into(),
1310                    role: "assistant".into(),
1311                    content: Some(Vec::new()),
1312                },
1313            },
1314            responses::StreamEvent::OutputTextDelta {
1315                item_id: "msg_1".into(),
1316                output_index: 0,
1317                delta: "Hello".into(),
1318            },
1319            responses::StreamEvent::Completed {
1320                response: responses::Response {
1321                    usage: Some(responses::ResponseUsage {
1322                        input_tokens: Some(5),
1323                        output_tokens: Some(3),
1324                        total_tokens: Some(8),
1325                    }),
1326                    ..Default::default()
1327                },
1328            },
1329        ];
1330
1331        let mapped = map_events(events);
1332        assert!(matches!(
1333            mapped[0],
1334            LanguageModelCompletionEvent::StartMessage { ref message_id } if message_id == "msg_1"
1335        ));
1336        assert!(matches!(
1337            mapped[1],
1338            LanguageModelCompletionEvent::Text(ref text) if text == "Hello"
1339        ));
1340        assert!(matches!(
1341            mapped[2],
1342            LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
1343                input_tokens: 5,
1344                output_tokens: 3,
1345                ..
1346            })
1347        ));
1348        assert!(matches!(
1349            mapped[3],
1350            LanguageModelCompletionEvent::Stop(StopReason::EndTurn)
1351        ));
1352    }
1353
1354    #[test]
1355    fn responses_stream_maps_tool_calls() {
1356        let events = vec![responses::StreamEvent::OutputItemDone {
1357            output_index: 0,
1358            sequence_number: None,
1359            item: responses::ResponseOutputItem::FunctionCall {
1360                id: Some("fn_1".into()),
1361                call_id: "call_1".into(),
1362                name: "do_it".into(),
1363                arguments: "{\"x\":1}".into(),
1364                status: None,
1365                thought_signature: None,
1366            },
1367        }];
1368
1369        let mapped = map_events(events);
1370        assert!(matches!(
1371            mapped[0],
1372            LanguageModelCompletionEvent::ToolUse(ref use_) if use_.id.to_string() == "call_1" && use_.name.as_ref() == "do_it"
1373        ));
1374        assert!(matches!(
1375            mapped[1],
1376            LanguageModelCompletionEvent::Stop(StopReason::ToolUse)
1377        ));
1378    }
1379
1380    #[test]
1381    fn responses_stream_handles_json_parse_error() {
1382        let events = vec![responses::StreamEvent::OutputItemDone {
1383            output_index: 0,
1384            sequence_number: None,
1385            item: responses::ResponseOutputItem::FunctionCall {
1386                id: Some("fn_1".into()),
1387                call_id: "call_1".into(),
1388                name: "do_it".into(),
1389                arguments: "{not json}".into(),
1390                status: None,
1391                thought_signature: None,
1392            },
1393        }];
1394
1395        let mapped = map_events(events);
1396        assert!(matches!(
1397            mapped[0],
1398            LanguageModelCompletionEvent::ToolUseJsonParseError { ref id, ref tool_name, .. }
1399                if id.to_string() == "call_1" && tool_name.as_ref() == "do_it"
1400        ));
1401        assert!(matches!(
1402            mapped[1],
1403            LanguageModelCompletionEvent::Stop(StopReason::ToolUse)
1404        ));
1405    }
1406
1407    #[test]
1408    fn responses_stream_maps_reasoning_summary_and_encrypted_content() {
1409        let events = vec![responses::StreamEvent::OutputItemDone {
1410            output_index: 0,
1411            sequence_number: None,
1412            item: responses::ResponseOutputItem::Reasoning {
1413                id: "r1".into(),
1414                summary: Some(vec![responses::ResponseReasoningItem {
1415                    kind: "summary_text".into(),
1416                    text: "Chain".into(),
1417                }]),
1418                encrypted_content: Some("ENC".into()),
1419            },
1420        }];
1421
1422        let mapped = map_events(events);
1423        assert!(matches!(
1424            mapped[0],
1425            LanguageModelCompletionEvent::Thinking { ref text, signature: None } if text == "Chain"
1426        ));
1427        assert!(matches!(
1428            mapped[1],
1429            LanguageModelCompletionEvent::RedactedThinking { ref data } if data == "ENC"
1430        ));
1431    }
1432
1433    #[test]
1434    fn responses_stream_handles_incomplete_max_tokens() {
1435        let events = vec![responses::StreamEvent::Incomplete {
1436            response: responses::Response {
1437                usage: Some(responses::ResponseUsage {
1438                    input_tokens: Some(10),
1439                    output_tokens: Some(0),
1440                    total_tokens: Some(10),
1441                }),
1442                incomplete_details: Some(responses::IncompleteDetails {
1443                    reason: Some(responses::IncompleteReason::MaxOutputTokens),
1444                }),
1445                ..Default::default()
1446            },
1447        }];
1448
1449        let mapped = map_events(events);
1450        assert!(matches!(
1451            mapped[0],
1452            LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
1453                input_tokens: 10,
1454                output_tokens: 0,
1455                ..
1456            })
1457        ));
1458        assert!(matches!(
1459            mapped[1],
1460            LanguageModelCompletionEvent::Stop(StopReason::MaxTokens)
1461        ));
1462    }
1463
1464    #[test]
1465    fn responses_stream_handles_incomplete_content_filter() {
1466        let events = vec![responses::StreamEvent::Incomplete {
1467            response: responses::Response {
1468                usage: None,
1469                incomplete_details: Some(responses::IncompleteDetails {
1470                    reason: Some(responses::IncompleteReason::ContentFilter),
1471                }),
1472                ..Default::default()
1473            },
1474        }];
1475
1476        let mapped = map_events(events);
1477        assert!(matches!(
1478            mapped.last().unwrap(),
1479            LanguageModelCompletionEvent::Stop(StopReason::Refusal)
1480        ));
1481    }
1482
1483    #[test]
1484    fn responses_stream_completed_no_duplicate_after_tool_use() {
1485        let events = vec![
1486            responses::StreamEvent::OutputItemDone {
1487                output_index: 0,
1488                sequence_number: None,
1489                item: responses::ResponseOutputItem::FunctionCall {
1490                    id: Some("fn_1".into()),
1491                    call_id: "call_1".into(),
1492                    name: "do_it".into(),
1493                    arguments: "{}".into(),
1494                    status: None,
1495                    thought_signature: None,
1496                },
1497            },
1498            responses::StreamEvent::Completed {
1499                response: responses::Response::default(),
1500            },
1501        ];
1502
1503        let mapped = map_events(events);
1504
1505        let mut stop_count = 0usize;
1506        let mut saw_tool_use_stop = false;
1507        for event in mapped {
1508            if let LanguageModelCompletionEvent::Stop(reason) = event {
1509                stop_count += 1;
1510                if matches!(reason, StopReason::ToolUse) {
1511                    saw_tool_use_stop = true;
1512                }
1513            }
1514        }
1515        assert_eq!(stop_count, 1, "should emit exactly one Stop event");
1516        assert!(saw_tool_use_stop, "Stop reason should be ToolUse");
1517    }
1518
1519    #[test]
1520    fn responses_stream_failed_maps_http_response_error() {
1521        let events = vec![responses::StreamEvent::Failed {
1522            response: responses::Response {
1523                error: Some(responses::ResponseError {
1524                    code: "429".into(),
1525                    message: "too many requests".into(),
1526                }),
1527                ..Default::default()
1528            },
1529        }];
1530
1531        let mapped_results = futures::executor::block_on(async {
1532            CopilotResponsesEventMapper::new()
1533                .map_stream(Box::pin(futures::stream::iter(events.into_iter().map(Ok))))
1534                .collect::<Vec<_>>()
1535                .await
1536        });
1537
1538        assert_eq!(mapped_results.len(), 1);
1539        match &mapped_results[0] {
1540            Err(LanguageModelCompletionError::HttpResponseError {
1541                status_code,
1542                message,
1543                ..
1544            }) => {
1545                assert_eq!(*status_code, http_client::StatusCode::TOO_MANY_REQUESTS);
1546                assert_eq!(message, "too many requests");
1547            }
1548            other => panic!("expected HttpResponseError, got {:?}", other),
1549        }
1550    }
1551
1552    #[test]
1553    fn chat_completions_stream_maps_reasoning_data() {
1554        use copilot_chat::{
1555            FunctionChunk, ResponseChoice, ResponseDelta, ResponseEvent, Role, ToolCallChunk,
1556        };
1557
1558        let events = vec![
1559            ResponseEvent {
1560                choices: vec![ResponseChoice {
1561                    index: Some(0),
1562                    finish_reason: None,
1563                    delta: Some(ResponseDelta {
1564                        content: None,
1565                        role: Some(Role::Assistant),
1566                        tool_calls: vec![ToolCallChunk {
1567                            index: Some(0),
1568                            id: Some("call_abc123".to_string()),
1569                            function: Some(FunctionChunk {
1570                                name: Some("list_directory".to_string()),
1571                                arguments: Some("{\"path\":\"test\"}".to_string()),
1572                                thought_signature: None,
1573                            }),
1574                        }],
1575                        reasoning_opaque: Some("encrypted_reasoning_token_xyz".to_string()),
1576                        reasoning_text: Some("Let me check the directory".to_string()),
1577                    }),
1578                    message: None,
1579                }],
1580                id: "chatcmpl-123".to_string(),
1581                usage: None,
1582            },
1583            ResponseEvent {
1584                choices: vec![ResponseChoice {
1585                    index: Some(0),
1586                    finish_reason: Some("tool_calls".to_string()),
1587                    delta: Some(ResponseDelta {
1588                        content: None,
1589                        role: None,
1590                        tool_calls: vec![],
1591                        reasoning_opaque: None,
1592                        reasoning_text: None,
1593                    }),
1594                    message: None,
1595                }],
1596                id: "chatcmpl-123".to_string(),
1597                usage: None,
1598            },
1599        ];
1600
1601        let mapped = futures::executor::block_on(async {
1602            map_to_language_model_completion_events(
1603                Box::pin(futures::stream::iter(events.into_iter().map(Ok))),
1604                true,
1605            )
1606            .collect::<Vec<_>>()
1607            .await
1608        });
1609
1610        let mut has_reasoning_details = false;
1611        let mut has_tool_use = false;
1612        let mut reasoning_opaque_value: Option<String> = None;
1613        let mut reasoning_text_value: Option<String> = None;
1614
1615        for event_result in mapped {
1616            match event_result {
1617                Ok(LanguageModelCompletionEvent::ReasoningDetails(details)) => {
1618                    has_reasoning_details = true;
1619                    reasoning_opaque_value = details
1620                        .get("reasoning_opaque")
1621                        .and_then(|v| v.as_str())
1622                        .map(|s| s.to_string());
1623                    reasoning_text_value = details
1624                        .get("reasoning_text")
1625                        .and_then(|v| v.as_str())
1626                        .map(|s| s.to_string());
1627                }
1628                Ok(LanguageModelCompletionEvent::ToolUse(tool_use)) => {
1629                    has_tool_use = true;
1630                    assert_eq!(tool_use.id.to_string(), "call_abc123");
1631                    assert_eq!(tool_use.name.as_ref(), "list_directory");
1632                }
1633                _ => {}
1634            }
1635        }
1636
1637        assert!(
1638            has_reasoning_details,
1639            "Should emit ReasoningDetails event for Gemini 3 reasoning"
1640        );
1641        assert!(has_tool_use, "Should emit ToolUse event");
1642        assert_eq!(
1643            reasoning_opaque_value,
1644            Some("encrypted_reasoning_token_xyz".to_string()),
1645            "Should capture reasoning_opaque"
1646        );
1647        assert_eq!(
1648            reasoning_text_value,
1649            Some("Let me check the directory".to_string()),
1650            "Should capture reasoning_text"
1651        );
1652    }
1653}