1use std::pin::Pin;
2use std::str::FromStr as _;
3use std::sync::Arc;
4
5use anyhow::{Result, anyhow};
6use cloud_llm_client::CompletionIntent;
7use collections::HashMap;
8use copilot::{GlobalCopilotAuth, Status};
9use copilot_chat::responses as copilot_responses;
10use copilot_chat::{
11 ChatMessage, ChatMessageContent, ChatMessagePart, CopilotChat, CopilotChatConfiguration,
12 Function, FunctionContent, ImageUrl, Model as CopilotChatModel, ModelVendor,
13 Request as CopilotChatRequest, ResponseEvent, Tool, ToolCall, ToolCallContent, ToolChoice,
14};
15use futures::future::BoxFuture;
16use futures::stream::BoxStream;
17use futures::{FutureExt, Stream, StreamExt};
18use gpui::{AnyView, App, AsyncApp, Entity, Subscription, Task};
19use http_client::StatusCode;
20use language::language_settings::all_language_settings;
21use language_model::{
22 AuthenticateError, IconOrSvg, LanguageModel, LanguageModelCompletionError,
23 LanguageModelCompletionEvent, LanguageModelCostInfo, LanguageModelId, LanguageModelName,
24 LanguageModelProvider, LanguageModelProviderId, LanguageModelProviderName,
25 LanguageModelProviderState, LanguageModelRequest, LanguageModelRequestMessage,
26 LanguageModelToolChoice, LanguageModelToolResultContent, LanguageModelToolSchemaFormat,
27 LanguageModelToolUse, MessageContent, RateLimiter, Role, StopReason, TokenUsage,
28};
29use settings::SettingsStore;
30use ui::prelude::*;
31use util::debug_panic;
32
33use crate::provider::util::parse_tool_arguments;
34
35const PROVIDER_ID: LanguageModelProviderId = LanguageModelProviderId::new("copilot_chat");
36const PROVIDER_NAME: LanguageModelProviderName =
37 LanguageModelProviderName::new("GitHub Copilot Chat");
38
39pub struct CopilotChatLanguageModelProvider {
40 state: Entity<State>,
41}
42
43pub struct State {
44 _copilot_chat_subscription: Option<Subscription>,
45 _settings_subscription: Subscription,
46}
47
48impl State {
49 fn is_authenticated(&self, cx: &App) -> bool {
50 CopilotChat::global(cx)
51 .map(|m| m.read(cx).is_authenticated())
52 .unwrap_or(false)
53 }
54}
55
56impl CopilotChatLanguageModelProvider {
57 pub fn new(cx: &mut App) -> Self {
58 let state = cx.new(|cx| {
59 let copilot_chat_subscription = CopilotChat::global(cx)
60 .map(|copilot_chat| cx.observe(&copilot_chat, |_, _, cx| cx.notify()));
61 State {
62 _copilot_chat_subscription: copilot_chat_subscription,
63 _settings_subscription: cx.observe_global::<SettingsStore>(|_, cx| {
64 if let Some(copilot_chat) = CopilotChat::global(cx) {
65 let language_settings = all_language_settings(None, cx);
66 let configuration = CopilotChatConfiguration {
67 enterprise_uri: language_settings
68 .edit_predictions
69 .copilot
70 .enterprise_uri
71 .clone(),
72 };
73 copilot_chat.update(cx, |chat, cx| {
74 chat.set_configuration(configuration, cx);
75 });
76 }
77 cx.notify();
78 }),
79 }
80 });
81
82 Self { state }
83 }
84
85 fn create_language_model(&self, model: CopilotChatModel) -> Arc<dyn LanguageModel> {
86 Arc::new(CopilotChatLanguageModel {
87 model,
88 request_limiter: RateLimiter::new(4),
89 })
90 }
91}
92
93impl LanguageModelProviderState for CopilotChatLanguageModelProvider {
94 type ObservableEntity = State;
95
96 fn observable_entity(&self) -> Option<Entity<Self::ObservableEntity>> {
97 Some(self.state.clone())
98 }
99}
100
101impl LanguageModelProvider for CopilotChatLanguageModelProvider {
102 fn id(&self) -> LanguageModelProviderId {
103 PROVIDER_ID
104 }
105
106 fn name(&self) -> LanguageModelProviderName {
107 PROVIDER_NAME
108 }
109
110 fn icon(&self) -> IconOrSvg {
111 IconOrSvg::Icon(IconName::Copilot)
112 }
113
114 fn default_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
115 let models = CopilotChat::global(cx).and_then(|m| m.read(cx).models())?;
116 models
117 .first()
118 .map(|model| self.create_language_model(model.clone()))
119 }
120
121 fn default_fast_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
122 // The default model should be Copilot Chat's 'base model', which is likely a relatively fast
123 // model (e.g. 4o) and a sensible choice when considering premium requests
124 self.default_model(cx)
125 }
126
127 fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
128 let Some(models) = CopilotChat::global(cx).and_then(|m| m.read(cx).models()) else {
129 return Vec::new();
130 };
131 models
132 .iter()
133 .map(|model| self.create_language_model(model.clone()))
134 .collect()
135 }
136
137 fn is_authenticated(&self, cx: &App) -> bool {
138 self.state.read(cx).is_authenticated(cx)
139 }
140
141 fn authenticate(&self, cx: &mut App) -> Task<Result<(), AuthenticateError>> {
142 if self.is_authenticated(cx) {
143 return Task::ready(Ok(()));
144 };
145
146 let Some(copilot) = GlobalCopilotAuth::try_global(cx).cloned() else {
147 return Task::ready(Err(anyhow!(concat!(
148 "Copilot must be enabled for Copilot Chat to work. ",
149 "Please enable Copilot and try again."
150 ))
151 .into()));
152 };
153
154 let err = match copilot.0.read(cx).status() {
155 Status::Authorized => return Task::ready(Ok(())),
156 Status::Disabled => anyhow!(
157 "Copilot must be enabled for Copilot Chat to work. Please enable Copilot and try again."
158 ),
159 Status::Error(err) => anyhow!(format!(
160 "Received the following error while signing into Copilot: {err}"
161 )),
162 Status::Starting { task: _ } => anyhow!(
163 "Copilot is still starting, please wait for Copilot to start then try again"
164 ),
165 Status::Unauthorized => anyhow!(
166 "Unable to authorize with Copilot. Please make sure that you have an active Copilot and Copilot Chat subscription."
167 ),
168 Status::SignedOut { .. } => {
169 anyhow!("You have signed out of Copilot. Please sign in to Copilot and try again.")
170 }
171 Status::SigningIn { prompt: _ } => anyhow!("Still signing into Copilot..."),
172 };
173
174 Task::ready(Err(err.into()))
175 }
176
177 fn configuration_view(
178 &self,
179 _target_agent: language_model::ConfigurationViewTargetAgent,
180 _: &mut Window,
181 cx: &mut App,
182 ) -> AnyView {
183 cx.new(|cx| {
184 copilot_ui::ConfigurationView::new(
185 |cx| {
186 CopilotChat::global(cx)
187 .map(|m| m.read(cx).is_authenticated())
188 .unwrap_or(false)
189 },
190 copilot_ui::ConfigurationMode::Chat,
191 cx,
192 )
193 })
194 .into()
195 }
196
197 fn reset_credentials(&self, _cx: &mut App) -> Task<Result<()>> {
198 Task::ready(Err(anyhow!(
199 "Signing out of GitHub Copilot Chat is currently not supported."
200 )))
201 }
202}
203
204fn collect_tiktoken_messages(
205 request: LanguageModelRequest,
206) -> Vec<tiktoken_rs::ChatCompletionRequestMessage> {
207 request
208 .messages
209 .into_iter()
210 .map(|message| tiktoken_rs::ChatCompletionRequestMessage {
211 role: match message.role {
212 Role::User => "user".into(),
213 Role::Assistant => "assistant".into(),
214 Role::System => "system".into(),
215 },
216 content: Some(message.string_contents()),
217 name: None,
218 function_call: None,
219 })
220 .collect::<Vec<_>>()
221}
222
223pub struct CopilotChatLanguageModel {
224 model: CopilotChatModel,
225 request_limiter: RateLimiter,
226}
227
228impl LanguageModel for CopilotChatLanguageModel {
229 fn id(&self) -> LanguageModelId {
230 LanguageModelId::from(self.model.id().to_string())
231 }
232
233 fn name(&self) -> LanguageModelName {
234 LanguageModelName::from(self.model.display_name().to_string())
235 }
236
237 fn provider_id(&self) -> LanguageModelProviderId {
238 PROVIDER_ID
239 }
240
241 fn provider_name(&self) -> LanguageModelProviderName {
242 PROVIDER_NAME
243 }
244
245 fn supports_tools(&self) -> bool {
246 self.model.supports_tools()
247 }
248
249 fn supports_streaming_tools(&self) -> bool {
250 true
251 }
252
253 fn supports_images(&self) -> bool {
254 self.model.supports_vision()
255 }
256
257 fn tool_input_format(&self) -> LanguageModelToolSchemaFormat {
258 match self.model.vendor() {
259 ModelVendor::OpenAI | ModelVendor::Anthropic => {
260 LanguageModelToolSchemaFormat::JsonSchema
261 }
262 ModelVendor::Google | ModelVendor::XAI | ModelVendor::Unknown => {
263 LanguageModelToolSchemaFormat::JsonSchemaSubset
264 }
265 }
266 }
267
268 fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
269 match choice {
270 LanguageModelToolChoice::Auto
271 | LanguageModelToolChoice::Any
272 | LanguageModelToolChoice::None => self.supports_tools(),
273 }
274 }
275
276 fn model_cost_info(&self) -> Option<LanguageModelCostInfo> {
277 LanguageModelCostInfo::RequestCost {
278 cost_per_request: self.model.multiplier(),
279 }
280 .into()
281 }
282
283 fn telemetry_id(&self) -> String {
284 format!("copilot_chat/{}", self.model.id())
285 }
286
287 fn max_token_count(&self) -> u64 {
288 self.model.max_token_count()
289 }
290
291 fn count_tokens(
292 &self,
293 request: LanguageModelRequest,
294 cx: &App,
295 ) -> BoxFuture<'static, Result<u64>> {
296 let model = self.model.clone();
297 cx.background_spawn(async move {
298 let messages = collect_tiktoken_messages(request);
299 // Copilot uses OpenAI tiktoken tokenizer for all it's model irrespective of the underlying provider(vendor).
300 let tokenizer_model = match model.tokenizer() {
301 Some("o200k_base") => "gpt-4o",
302 Some("cl100k_base") => "gpt-4",
303 _ => "gpt-4o",
304 };
305
306 tiktoken_rs::num_tokens_from_messages(tokenizer_model, &messages)
307 .map(|tokens| tokens as u64)
308 })
309 .boxed()
310 }
311
312 fn stream_completion(
313 &self,
314 request: LanguageModelRequest,
315 cx: &AsyncApp,
316 ) -> BoxFuture<
317 'static,
318 Result<
319 BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>,
320 LanguageModelCompletionError,
321 >,
322 > {
323 let is_user_initiated = request.intent.is_none_or(|intent| match intent {
324 CompletionIntent::UserPrompt
325 | CompletionIntent::ThreadContextSummarization
326 | CompletionIntent::InlineAssist
327 | CompletionIntent::TerminalInlineAssist
328 | CompletionIntent::GenerateGitCommitMessage => true,
329
330 CompletionIntent::ToolResults
331 | CompletionIntent::ThreadSummarization
332 | CompletionIntent::CreateFile
333 | CompletionIntent::EditFile => false,
334 });
335
336 if self.model.supports_response() {
337 let responses_request = into_copilot_responses(&self.model, request);
338 let request_limiter = self.request_limiter.clone();
339 let future = cx.spawn(async move |cx| {
340 let request =
341 CopilotChat::stream_response(responses_request, is_user_initiated, cx.clone());
342 request_limiter
343 .stream(async move {
344 let stream = request.await?;
345 let mapper = CopilotResponsesEventMapper::new();
346 Ok(mapper.map_stream(stream).boxed())
347 })
348 .await
349 });
350 return async move { Ok(future.await?.boxed()) }.boxed();
351 }
352
353 let copilot_request = match into_copilot_chat(&self.model, request) {
354 Ok(request) => request,
355 Err(err) => return futures::future::ready(Err(err.into())).boxed(),
356 };
357 let is_streaming = copilot_request.stream;
358
359 let request_limiter = self.request_limiter.clone();
360 let future = cx.spawn(async move |cx| {
361 let request =
362 CopilotChat::stream_completion(copilot_request, is_user_initiated, cx.clone());
363 request_limiter
364 .stream(async move {
365 let response = request.await?;
366 Ok(map_to_language_model_completion_events(
367 response,
368 is_streaming,
369 ))
370 })
371 .await
372 });
373 async move { Ok(future.await?.boxed()) }.boxed()
374 }
375}
376
377pub fn map_to_language_model_completion_events(
378 events: Pin<Box<dyn Send + Stream<Item = Result<ResponseEvent>>>>,
379 is_streaming: bool,
380) -> impl Stream<Item = Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
381 #[derive(Default)]
382 struct RawToolCall {
383 id: String,
384 name: String,
385 arguments: String,
386 thought_signature: Option<String>,
387 }
388
389 struct State {
390 events: Pin<Box<dyn Send + Stream<Item = Result<ResponseEvent>>>>,
391 tool_calls_by_index: HashMap<usize, RawToolCall>,
392 reasoning_opaque: Option<String>,
393 reasoning_text: Option<String>,
394 }
395
396 futures::stream::unfold(
397 State {
398 events,
399 tool_calls_by_index: HashMap::default(),
400 reasoning_opaque: None,
401 reasoning_text: None,
402 },
403 move |mut state| async move {
404 if let Some(event) = state.events.next().await {
405 match event {
406 Ok(event) => {
407 let Some(choice) = event.choices.first() else {
408 return Some((
409 vec![Err(anyhow!("Response contained no choices").into())],
410 state,
411 ));
412 };
413
414 let delta = if is_streaming {
415 choice.delta.as_ref()
416 } else {
417 choice.message.as_ref()
418 };
419
420 let Some(delta) = delta else {
421 return Some((
422 vec![Err(anyhow!("Response contained no delta").into())],
423 state,
424 ));
425 };
426
427 let mut events = Vec::new();
428 if let Some(content) = delta.content.clone() {
429 events.push(Ok(LanguageModelCompletionEvent::Text(content)));
430 }
431
432 // Capture reasoning data from the delta (e.g. for Gemini 3)
433 if let Some(opaque) = delta.reasoning_opaque.clone() {
434 state.reasoning_opaque = Some(opaque);
435 }
436 if let Some(text) = delta.reasoning_text.clone() {
437 state.reasoning_text = Some(text);
438 }
439
440 for (index, tool_call) in delta.tool_calls.iter().enumerate() {
441 let tool_index = tool_call.index.unwrap_or(index);
442 let entry = state.tool_calls_by_index.entry(tool_index).or_default();
443
444 if let Some(tool_id) = tool_call.id.clone() {
445 entry.id = tool_id;
446 }
447
448 if let Some(function) = tool_call.function.as_ref() {
449 if let Some(name) = function.name.clone() {
450 entry.name = name;
451 }
452
453 if let Some(arguments) = function.arguments.clone() {
454 entry.arguments.push_str(&arguments);
455 }
456
457 if let Some(thought_signature) = function.thought_signature.clone()
458 {
459 entry.thought_signature = Some(thought_signature);
460 }
461 }
462
463 if !entry.id.is_empty() && !entry.name.is_empty() {
464 if let Ok(input) = serde_json::from_str::<serde_json::Value>(
465 &partial_json_fixer::fix_json(&entry.arguments),
466 ) {
467 events.push(Ok(LanguageModelCompletionEvent::ToolUse(
468 LanguageModelToolUse {
469 id: entry.id.clone().into(),
470 name: entry.name.as_str().into(),
471 is_input_complete: false,
472 input,
473 raw_input: entry.arguments.clone(),
474 thought_signature: entry.thought_signature.clone(),
475 },
476 )));
477 }
478 }
479 }
480
481 if let Some(usage) = event.usage {
482 events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(
483 TokenUsage {
484 input_tokens: usage.prompt_tokens,
485 output_tokens: usage.completion_tokens,
486 cache_creation_input_tokens: 0,
487 cache_read_input_tokens: 0,
488 },
489 )));
490 }
491
492 match choice.finish_reason.as_deref() {
493 Some("stop") => {
494 events.push(Ok(LanguageModelCompletionEvent::Stop(
495 StopReason::EndTurn,
496 )));
497 }
498 Some("tool_calls") => {
499 // Gemini 3 models send reasoning_opaque/reasoning_text that must
500 // be preserved and sent back in subsequent requests. Emit as
501 // ReasoningDetails so the agent stores it in the message.
502 if state.reasoning_opaque.is_some()
503 || state.reasoning_text.is_some()
504 {
505 let mut details = serde_json::Map::new();
506 if let Some(opaque) = state.reasoning_opaque.take() {
507 details.insert(
508 "reasoning_opaque".to_string(),
509 serde_json::Value::String(opaque),
510 );
511 }
512 if let Some(text) = state.reasoning_text.take() {
513 details.insert(
514 "reasoning_text".to_string(),
515 serde_json::Value::String(text),
516 );
517 }
518 events.push(Ok(
519 LanguageModelCompletionEvent::ReasoningDetails(
520 serde_json::Value::Object(details),
521 ),
522 ));
523 }
524
525 events.extend(state.tool_calls_by_index.drain().map(
526 |(_, tool_call)| match parse_tool_arguments(
527 &tool_call.arguments,
528 ) {
529 Ok(input) => Ok(LanguageModelCompletionEvent::ToolUse(
530 LanguageModelToolUse {
531 id: tool_call.id.into(),
532 name: tool_call.name.as_str().into(),
533 is_input_complete: true,
534 input,
535 raw_input: tool_call.arguments,
536 thought_signature: tool_call.thought_signature,
537 },
538 )),
539 Err(error) => Ok(
540 LanguageModelCompletionEvent::ToolUseJsonParseError {
541 id: tool_call.id.into(),
542 tool_name: tool_call.name.as_str().into(),
543 raw_input: tool_call.arguments.into(),
544 json_parse_error: error.to_string(),
545 },
546 ),
547 },
548 ));
549
550 events.push(Ok(LanguageModelCompletionEvent::Stop(
551 StopReason::ToolUse,
552 )));
553 }
554 Some(stop_reason) => {
555 log::error!("Unexpected Copilot Chat stop_reason: {stop_reason:?}");
556 events.push(Ok(LanguageModelCompletionEvent::Stop(
557 StopReason::EndTurn,
558 )));
559 }
560 None => {}
561 }
562
563 return Some((events, state));
564 }
565 Err(err) => return Some((vec![Err(anyhow!(err).into())], state)),
566 }
567 }
568
569 None
570 },
571 )
572 .flat_map(futures::stream::iter)
573}
574
575pub struct CopilotResponsesEventMapper {
576 pending_stop_reason: Option<StopReason>,
577}
578
579impl CopilotResponsesEventMapper {
580 pub fn new() -> Self {
581 Self {
582 pending_stop_reason: None,
583 }
584 }
585
586 pub fn map_stream(
587 mut self,
588 events: Pin<Box<dyn Send + Stream<Item = Result<copilot_responses::StreamEvent>>>>,
589 ) -> impl Stream<Item = Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
590 {
591 events.flat_map(move |event| {
592 futures::stream::iter(match event {
593 Ok(event) => self.map_event(event),
594 Err(error) => vec![Err(LanguageModelCompletionError::from(anyhow!(error)))],
595 })
596 })
597 }
598
599 fn map_event(
600 &mut self,
601 event: copilot_responses::StreamEvent,
602 ) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
603 match event {
604 copilot_responses::StreamEvent::OutputItemAdded { item, .. } => match item {
605 copilot_responses::ResponseOutputItem::Message { id, .. } => {
606 vec![Ok(LanguageModelCompletionEvent::StartMessage {
607 message_id: id,
608 })]
609 }
610 _ => Vec::new(),
611 },
612
613 copilot_responses::StreamEvent::OutputTextDelta { delta, .. } => {
614 if delta.is_empty() {
615 Vec::new()
616 } else {
617 vec![Ok(LanguageModelCompletionEvent::Text(delta))]
618 }
619 }
620
621 copilot_responses::StreamEvent::OutputItemDone { item, .. } => match item {
622 copilot_responses::ResponseOutputItem::Message { .. } => Vec::new(),
623 copilot_responses::ResponseOutputItem::FunctionCall {
624 call_id,
625 name,
626 arguments,
627 thought_signature,
628 ..
629 } => {
630 let mut events = Vec::new();
631 match parse_tool_arguments(&arguments) {
632 Ok(input) => events.push(Ok(LanguageModelCompletionEvent::ToolUse(
633 LanguageModelToolUse {
634 id: call_id.into(),
635 name: name.as_str().into(),
636 is_input_complete: true,
637 input,
638 raw_input: arguments.clone(),
639 thought_signature,
640 },
641 ))),
642 Err(error) => {
643 events.push(Ok(LanguageModelCompletionEvent::ToolUseJsonParseError {
644 id: call_id.into(),
645 tool_name: name.as_str().into(),
646 raw_input: arguments.clone().into(),
647 json_parse_error: error.to_string(),
648 }))
649 }
650 }
651 // Record that we already emitted a tool-use stop so we can avoid duplicating
652 // a Stop event on Completed.
653 self.pending_stop_reason = Some(StopReason::ToolUse);
654 events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::ToolUse)));
655 events
656 }
657 copilot_responses::ResponseOutputItem::Reasoning {
658 summary,
659 encrypted_content,
660 ..
661 } => {
662 let mut events = Vec::new();
663
664 if let Some(blocks) = summary {
665 let mut text = String::new();
666 for block in blocks {
667 text.push_str(&block.text);
668 }
669 if !text.is_empty() {
670 events.push(Ok(LanguageModelCompletionEvent::Thinking {
671 text,
672 signature: None,
673 }));
674 }
675 }
676
677 if let Some(data) = encrypted_content {
678 events.push(Ok(LanguageModelCompletionEvent::RedactedThinking { data }));
679 }
680
681 events
682 }
683 },
684
685 copilot_responses::StreamEvent::Completed { response } => {
686 let mut events = Vec::new();
687 if let Some(usage) = response.usage {
688 events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
689 input_tokens: usage.input_tokens.unwrap_or(0),
690 output_tokens: usage.output_tokens.unwrap_or(0),
691 cache_creation_input_tokens: 0,
692 cache_read_input_tokens: 0,
693 })));
694 }
695 if self.pending_stop_reason.take() != Some(StopReason::ToolUse) {
696 events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::EndTurn)));
697 }
698 events
699 }
700
701 copilot_responses::StreamEvent::Incomplete { response } => {
702 let reason = response
703 .incomplete_details
704 .as_ref()
705 .and_then(|details| details.reason.as_ref());
706 let stop_reason = match reason {
707 Some(copilot_responses::IncompleteReason::MaxOutputTokens) => {
708 StopReason::MaxTokens
709 }
710 Some(copilot_responses::IncompleteReason::ContentFilter) => StopReason::Refusal,
711 _ => self
712 .pending_stop_reason
713 .take()
714 .unwrap_or(StopReason::EndTurn),
715 };
716
717 let mut events = Vec::new();
718 if let Some(usage) = response.usage {
719 events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
720 input_tokens: usage.input_tokens.unwrap_or(0),
721 output_tokens: usage.output_tokens.unwrap_or(0),
722 cache_creation_input_tokens: 0,
723 cache_read_input_tokens: 0,
724 })));
725 }
726 events.push(Ok(LanguageModelCompletionEvent::Stop(stop_reason)));
727 events
728 }
729
730 copilot_responses::StreamEvent::Failed { response } => {
731 let provider = PROVIDER_NAME;
732 let (status_code, message) = match response.error {
733 Some(error) => {
734 let status_code = StatusCode::from_str(&error.code)
735 .unwrap_or(StatusCode::INTERNAL_SERVER_ERROR);
736 (status_code, error.message)
737 }
738 None => (
739 StatusCode::INTERNAL_SERVER_ERROR,
740 "response.failed".to_string(),
741 ),
742 };
743 vec![Err(LanguageModelCompletionError::HttpResponseError {
744 provider,
745 status_code,
746 message,
747 })]
748 }
749
750 copilot_responses::StreamEvent::GenericError { error } => vec![Err(
751 LanguageModelCompletionError::Other(anyhow!(error.message)),
752 )],
753
754 copilot_responses::StreamEvent::Created { .. }
755 | copilot_responses::StreamEvent::Unknown => Vec::new(),
756 }
757 }
758}
759
760fn into_copilot_chat(
761 model: &CopilotChatModel,
762 request: LanguageModelRequest,
763) -> Result<CopilotChatRequest> {
764 let mut request_messages: Vec<LanguageModelRequestMessage> = Vec::new();
765 for message in request.messages {
766 if let Some(last_message) = request_messages.last_mut() {
767 if last_message.role == message.role {
768 last_message.content.extend(message.content);
769 } else {
770 request_messages.push(message);
771 }
772 } else {
773 request_messages.push(message);
774 }
775 }
776
777 let mut messages: Vec<ChatMessage> = Vec::new();
778 for message in request_messages {
779 match message.role {
780 Role::User => {
781 for content in &message.content {
782 if let MessageContent::ToolResult(tool_result) = content {
783 let content = match &tool_result.content {
784 LanguageModelToolResultContent::Text(text) => text.to_string().into(),
785 LanguageModelToolResultContent::Image(image) => {
786 if model.supports_vision() {
787 ChatMessageContent::Multipart(vec![ChatMessagePart::Image {
788 image_url: ImageUrl {
789 url: image.to_base64_url(),
790 },
791 }])
792 } else {
793 debug_panic!(
794 "This should be caught at {} level",
795 tool_result.tool_name
796 );
797 "[Tool responded with an image, but this model does not support vision]".to_string().into()
798 }
799 }
800 };
801
802 messages.push(ChatMessage::Tool {
803 tool_call_id: tool_result.tool_use_id.to_string(),
804 content,
805 });
806 }
807 }
808
809 let mut content_parts = Vec::new();
810 for content in &message.content {
811 match content {
812 MessageContent::Text(text) | MessageContent::Thinking { text, .. }
813 if !text.is_empty() =>
814 {
815 if let Some(ChatMessagePart::Text { text: text_content }) =
816 content_parts.last_mut()
817 {
818 text_content.push_str(text);
819 } else {
820 content_parts.push(ChatMessagePart::Text {
821 text: text.to_string(),
822 });
823 }
824 }
825 MessageContent::Image(image) if model.supports_vision() => {
826 content_parts.push(ChatMessagePart::Image {
827 image_url: ImageUrl {
828 url: image.to_base64_url(),
829 },
830 });
831 }
832 _ => {}
833 }
834 }
835
836 if !content_parts.is_empty() {
837 messages.push(ChatMessage::User {
838 content: content_parts.into(),
839 });
840 }
841 }
842 Role::Assistant => {
843 let mut tool_calls = Vec::new();
844 for content in &message.content {
845 if let MessageContent::ToolUse(tool_use) = content {
846 tool_calls.push(ToolCall {
847 id: tool_use.id.to_string(),
848 content: ToolCallContent::Function {
849 function: FunctionContent {
850 name: tool_use.name.to_string(),
851 arguments: serde_json::to_string(&tool_use.input)?,
852 thought_signature: tool_use.thought_signature.clone(),
853 },
854 },
855 });
856 }
857 }
858
859 let text_content = {
860 let mut buffer = String::new();
861 for string in message.content.iter().filter_map(|content| match content {
862 MessageContent::Text(text) | MessageContent::Thinking { text, .. } => {
863 Some(text.as_str())
864 }
865 MessageContent::ToolUse(_)
866 | MessageContent::RedactedThinking(_)
867 | MessageContent::ToolResult(_)
868 | MessageContent::Image(_) => None,
869 }) {
870 buffer.push_str(string);
871 }
872
873 buffer
874 };
875
876 // Extract reasoning_opaque and reasoning_text from reasoning_details
877 let (reasoning_opaque, reasoning_text) =
878 if let Some(details) = &message.reasoning_details {
879 let opaque = details
880 .get("reasoning_opaque")
881 .and_then(|v| v.as_str())
882 .map(|s| s.to_string());
883 let text = details
884 .get("reasoning_text")
885 .and_then(|v| v.as_str())
886 .map(|s| s.to_string());
887 (opaque, text)
888 } else {
889 (None, None)
890 };
891
892 messages.push(ChatMessage::Assistant {
893 content: if text_content.is_empty() {
894 ChatMessageContent::empty()
895 } else {
896 text_content.into()
897 },
898 tool_calls,
899 reasoning_opaque,
900 reasoning_text,
901 });
902 }
903 Role::System => messages.push(ChatMessage::System {
904 content: message.string_contents(),
905 }),
906 }
907 }
908
909 let tools = request
910 .tools
911 .iter()
912 .map(|tool| Tool::Function {
913 function: Function {
914 name: tool.name.clone(),
915 description: tool.description.clone(),
916 parameters: tool.input_schema.clone(),
917 },
918 })
919 .collect::<Vec<_>>();
920
921 Ok(CopilotChatRequest {
922 intent: true,
923 n: 1,
924 stream: model.uses_streaming(),
925 temperature: 0.1,
926 model: model.id().to_string(),
927 messages,
928 tools,
929 tool_choice: request.tool_choice.map(|choice| match choice {
930 LanguageModelToolChoice::Auto => ToolChoice::Auto,
931 LanguageModelToolChoice::Any => ToolChoice::Any,
932 LanguageModelToolChoice::None => ToolChoice::None,
933 }),
934 })
935}
936
937fn into_copilot_responses(
938 model: &CopilotChatModel,
939 request: LanguageModelRequest,
940) -> copilot_responses::Request {
941 use copilot_responses as responses;
942
943 let LanguageModelRequest {
944 thread_id: _,
945 prompt_id: _,
946 intent: _,
947 messages,
948 tools,
949 tool_choice,
950 stop: _,
951 temperature,
952 thinking_allowed: _,
953 thinking_effort: _,
954 speed: _,
955 } = request;
956
957 let mut input_items: Vec<responses::ResponseInputItem> = Vec::new();
958
959 for message in messages {
960 match message.role {
961 Role::User => {
962 for content in &message.content {
963 if let MessageContent::ToolResult(tool_result) = content {
964 let output = if let Some(out) = &tool_result.output {
965 match out {
966 serde_json::Value::String(s) => {
967 responses::ResponseFunctionOutput::Text(s.clone())
968 }
969 serde_json::Value::Null => {
970 responses::ResponseFunctionOutput::Text(String::new())
971 }
972 other => responses::ResponseFunctionOutput::Text(other.to_string()),
973 }
974 } else {
975 match &tool_result.content {
976 LanguageModelToolResultContent::Text(text) => {
977 responses::ResponseFunctionOutput::Text(text.to_string())
978 }
979 LanguageModelToolResultContent::Image(image) => {
980 if model.supports_vision() {
981 responses::ResponseFunctionOutput::Content(vec![
982 responses::ResponseInputContent::InputImage {
983 image_url: Some(image.to_base64_url()),
984 detail: Default::default(),
985 },
986 ])
987 } else {
988 debug_panic!(
989 "This should be caught at {} level",
990 tool_result.tool_name
991 );
992 responses::ResponseFunctionOutput::Text(
993 "[Tool responded with an image, but this model does not support vision]".into(),
994 )
995 }
996 }
997 }
998 };
999
1000 input_items.push(responses::ResponseInputItem::FunctionCallOutput {
1001 call_id: tool_result.tool_use_id.to_string(),
1002 output,
1003 status: None,
1004 });
1005 }
1006 }
1007
1008 let mut parts: Vec<responses::ResponseInputContent> = Vec::new();
1009 for content in &message.content {
1010 match content {
1011 MessageContent::Text(text) => {
1012 parts.push(responses::ResponseInputContent::InputText {
1013 text: text.clone(),
1014 });
1015 }
1016
1017 MessageContent::Image(image) => {
1018 if model.supports_vision() {
1019 parts.push(responses::ResponseInputContent::InputImage {
1020 image_url: Some(image.to_base64_url()),
1021 detail: Default::default(),
1022 });
1023 }
1024 }
1025 _ => {}
1026 }
1027 }
1028
1029 if !parts.is_empty() {
1030 input_items.push(responses::ResponseInputItem::Message {
1031 role: "user".into(),
1032 content: Some(parts),
1033 status: None,
1034 });
1035 }
1036 }
1037
1038 Role::Assistant => {
1039 for content in &message.content {
1040 if let MessageContent::ToolUse(tool_use) = content {
1041 input_items.push(responses::ResponseInputItem::FunctionCall {
1042 call_id: tool_use.id.to_string(),
1043 name: tool_use.name.to_string(),
1044 arguments: tool_use.raw_input.clone(),
1045 status: None,
1046 thought_signature: tool_use.thought_signature.clone(),
1047 });
1048 }
1049 }
1050
1051 for content in &message.content {
1052 if let MessageContent::RedactedThinking(data) = content {
1053 input_items.push(responses::ResponseInputItem::Reasoning {
1054 id: None,
1055 summary: Vec::new(),
1056 encrypted_content: data.clone(),
1057 });
1058 }
1059 }
1060
1061 let mut parts: Vec<responses::ResponseInputContent> = Vec::new();
1062 for content in &message.content {
1063 match content {
1064 MessageContent::Text(text) => {
1065 parts.push(responses::ResponseInputContent::OutputText {
1066 text: text.clone(),
1067 });
1068 }
1069 MessageContent::Image(_) => {
1070 parts.push(responses::ResponseInputContent::OutputText {
1071 text: "[image omitted]".to_string(),
1072 });
1073 }
1074 _ => {}
1075 }
1076 }
1077
1078 if !parts.is_empty() {
1079 input_items.push(responses::ResponseInputItem::Message {
1080 role: "assistant".into(),
1081 content: Some(parts),
1082 status: Some("completed".into()),
1083 });
1084 }
1085 }
1086
1087 Role::System => {
1088 let mut parts: Vec<responses::ResponseInputContent> = Vec::new();
1089 for content in &message.content {
1090 if let MessageContent::Text(text) = content {
1091 parts.push(responses::ResponseInputContent::InputText {
1092 text: text.clone(),
1093 });
1094 }
1095 }
1096
1097 if !parts.is_empty() {
1098 input_items.push(responses::ResponseInputItem::Message {
1099 role: "system".into(),
1100 content: Some(parts),
1101 status: None,
1102 });
1103 }
1104 }
1105 }
1106 }
1107
1108 let converted_tools: Vec<responses::ToolDefinition> = tools
1109 .into_iter()
1110 .map(|tool| responses::ToolDefinition::Function {
1111 name: tool.name,
1112 description: Some(tool.description),
1113 parameters: Some(tool.input_schema),
1114 strict: None,
1115 })
1116 .collect();
1117
1118 let mapped_tool_choice = tool_choice.map(|choice| match choice {
1119 LanguageModelToolChoice::Auto => responses::ToolChoice::Auto,
1120 LanguageModelToolChoice::Any => responses::ToolChoice::Any,
1121 LanguageModelToolChoice::None => responses::ToolChoice::None,
1122 });
1123
1124 responses::Request {
1125 model: model.id().to_string(),
1126 input: input_items,
1127 stream: model.uses_streaming(),
1128 temperature,
1129 tools: converted_tools,
1130 tool_choice: mapped_tool_choice,
1131 reasoning: None, // We would need to add support for setting from user settings.
1132 include: Some(vec![
1133 copilot_responses::ResponseIncludable::ReasoningEncryptedContent,
1134 ]),
1135 }
1136}
1137
1138#[cfg(test)]
1139mod tests {
1140 use super::*;
1141 use copilot_chat::responses;
1142 use futures::StreamExt;
1143
1144 fn map_events(events: Vec<responses::StreamEvent>) -> Vec<LanguageModelCompletionEvent> {
1145 futures::executor::block_on(async {
1146 CopilotResponsesEventMapper::new()
1147 .map_stream(Box::pin(futures::stream::iter(events.into_iter().map(Ok))))
1148 .collect::<Vec<_>>()
1149 .await
1150 .into_iter()
1151 .map(Result::unwrap)
1152 .collect()
1153 })
1154 }
1155
1156 #[test]
1157 fn responses_stream_maps_text_and_usage() {
1158 let events = vec![
1159 responses::StreamEvent::OutputItemAdded {
1160 output_index: 0,
1161 sequence_number: None,
1162 item: responses::ResponseOutputItem::Message {
1163 id: "msg_1".into(),
1164 role: "assistant".into(),
1165 content: Some(Vec::new()),
1166 },
1167 },
1168 responses::StreamEvent::OutputTextDelta {
1169 item_id: "msg_1".into(),
1170 output_index: 0,
1171 delta: "Hello".into(),
1172 },
1173 responses::StreamEvent::Completed {
1174 response: responses::Response {
1175 usage: Some(responses::ResponseUsage {
1176 input_tokens: Some(5),
1177 output_tokens: Some(3),
1178 total_tokens: Some(8),
1179 }),
1180 ..Default::default()
1181 },
1182 },
1183 ];
1184
1185 let mapped = map_events(events);
1186 assert!(matches!(
1187 mapped[0],
1188 LanguageModelCompletionEvent::StartMessage { ref message_id } if message_id == "msg_1"
1189 ));
1190 assert!(matches!(
1191 mapped[1],
1192 LanguageModelCompletionEvent::Text(ref text) if text == "Hello"
1193 ));
1194 assert!(matches!(
1195 mapped[2],
1196 LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
1197 input_tokens: 5,
1198 output_tokens: 3,
1199 ..
1200 })
1201 ));
1202 assert!(matches!(
1203 mapped[3],
1204 LanguageModelCompletionEvent::Stop(StopReason::EndTurn)
1205 ));
1206 }
1207
1208 #[test]
1209 fn responses_stream_maps_tool_calls() {
1210 let events = vec![responses::StreamEvent::OutputItemDone {
1211 output_index: 0,
1212 sequence_number: None,
1213 item: responses::ResponseOutputItem::FunctionCall {
1214 id: Some("fn_1".into()),
1215 call_id: "call_1".into(),
1216 name: "do_it".into(),
1217 arguments: "{\"x\":1}".into(),
1218 status: None,
1219 thought_signature: None,
1220 },
1221 }];
1222
1223 let mapped = map_events(events);
1224 assert!(matches!(
1225 mapped[0],
1226 LanguageModelCompletionEvent::ToolUse(ref use_) if use_.id.to_string() == "call_1" && use_.name.as_ref() == "do_it"
1227 ));
1228 assert!(matches!(
1229 mapped[1],
1230 LanguageModelCompletionEvent::Stop(StopReason::ToolUse)
1231 ));
1232 }
1233
1234 #[test]
1235 fn responses_stream_handles_json_parse_error() {
1236 let events = vec![responses::StreamEvent::OutputItemDone {
1237 output_index: 0,
1238 sequence_number: None,
1239 item: responses::ResponseOutputItem::FunctionCall {
1240 id: Some("fn_1".into()),
1241 call_id: "call_1".into(),
1242 name: "do_it".into(),
1243 arguments: "{not json}".into(),
1244 status: None,
1245 thought_signature: None,
1246 },
1247 }];
1248
1249 let mapped = map_events(events);
1250 assert!(matches!(
1251 mapped[0],
1252 LanguageModelCompletionEvent::ToolUseJsonParseError { ref id, ref tool_name, .. }
1253 if id.to_string() == "call_1" && tool_name.as_ref() == "do_it"
1254 ));
1255 assert!(matches!(
1256 mapped[1],
1257 LanguageModelCompletionEvent::Stop(StopReason::ToolUse)
1258 ));
1259 }
1260
1261 #[test]
1262 fn responses_stream_maps_reasoning_summary_and_encrypted_content() {
1263 let events = vec![responses::StreamEvent::OutputItemDone {
1264 output_index: 0,
1265 sequence_number: None,
1266 item: responses::ResponseOutputItem::Reasoning {
1267 id: "r1".into(),
1268 summary: Some(vec![responses::ResponseReasoningItem {
1269 kind: "summary_text".into(),
1270 text: "Chain".into(),
1271 }]),
1272 encrypted_content: Some("ENC".into()),
1273 },
1274 }];
1275
1276 let mapped = map_events(events);
1277 assert!(matches!(
1278 mapped[0],
1279 LanguageModelCompletionEvent::Thinking { ref text, signature: None } if text == "Chain"
1280 ));
1281 assert!(matches!(
1282 mapped[1],
1283 LanguageModelCompletionEvent::RedactedThinking { ref data } if data == "ENC"
1284 ));
1285 }
1286
1287 #[test]
1288 fn responses_stream_handles_incomplete_max_tokens() {
1289 let events = vec![responses::StreamEvent::Incomplete {
1290 response: responses::Response {
1291 usage: Some(responses::ResponseUsage {
1292 input_tokens: Some(10),
1293 output_tokens: Some(0),
1294 total_tokens: Some(10),
1295 }),
1296 incomplete_details: Some(responses::IncompleteDetails {
1297 reason: Some(responses::IncompleteReason::MaxOutputTokens),
1298 }),
1299 ..Default::default()
1300 },
1301 }];
1302
1303 let mapped = map_events(events);
1304 assert!(matches!(
1305 mapped[0],
1306 LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
1307 input_tokens: 10,
1308 output_tokens: 0,
1309 ..
1310 })
1311 ));
1312 assert!(matches!(
1313 mapped[1],
1314 LanguageModelCompletionEvent::Stop(StopReason::MaxTokens)
1315 ));
1316 }
1317
1318 #[test]
1319 fn responses_stream_handles_incomplete_content_filter() {
1320 let events = vec![responses::StreamEvent::Incomplete {
1321 response: responses::Response {
1322 usage: None,
1323 incomplete_details: Some(responses::IncompleteDetails {
1324 reason: Some(responses::IncompleteReason::ContentFilter),
1325 }),
1326 ..Default::default()
1327 },
1328 }];
1329
1330 let mapped = map_events(events);
1331 assert!(matches!(
1332 mapped.last().unwrap(),
1333 LanguageModelCompletionEvent::Stop(StopReason::Refusal)
1334 ));
1335 }
1336
1337 #[test]
1338 fn responses_stream_completed_no_duplicate_after_tool_use() {
1339 let events = vec![
1340 responses::StreamEvent::OutputItemDone {
1341 output_index: 0,
1342 sequence_number: None,
1343 item: responses::ResponseOutputItem::FunctionCall {
1344 id: Some("fn_1".into()),
1345 call_id: "call_1".into(),
1346 name: "do_it".into(),
1347 arguments: "{}".into(),
1348 status: None,
1349 thought_signature: None,
1350 },
1351 },
1352 responses::StreamEvent::Completed {
1353 response: responses::Response::default(),
1354 },
1355 ];
1356
1357 let mapped = map_events(events);
1358
1359 let mut stop_count = 0usize;
1360 let mut saw_tool_use_stop = false;
1361 for event in mapped {
1362 if let LanguageModelCompletionEvent::Stop(reason) = event {
1363 stop_count += 1;
1364 if matches!(reason, StopReason::ToolUse) {
1365 saw_tool_use_stop = true;
1366 }
1367 }
1368 }
1369 assert_eq!(stop_count, 1, "should emit exactly one Stop event");
1370 assert!(saw_tool_use_stop, "Stop reason should be ToolUse");
1371 }
1372
1373 #[test]
1374 fn responses_stream_failed_maps_http_response_error() {
1375 let events = vec![responses::StreamEvent::Failed {
1376 response: responses::Response {
1377 error: Some(responses::ResponseError {
1378 code: "429".into(),
1379 message: "too many requests".into(),
1380 }),
1381 ..Default::default()
1382 },
1383 }];
1384
1385 let mapped_results = futures::executor::block_on(async {
1386 CopilotResponsesEventMapper::new()
1387 .map_stream(Box::pin(futures::stream::iter(events.into_iter().map(Ok))))
1388 .collect::<Vec<_>>()
1389 .await
1390 });
1391
1392 assert_eq!(mapped_results.len(), 1);
1393 match &mapped_results[0] {
1394 Err(LanguageModelCompletionError::HttpResponseError {
1395 status_code,
1396 message,
1397 ..
1398 }) => {
1399 assert_eq!(*status_code, http_client::StatusCode::TOO_MANY_REQUESTS);
1400 assert_eq!(message, "too many requests");
1401 }
1402 other => panic!("expected HttpResponseError, got {:?}", other),
1403 }
1404 }
1405
1406 #[test]
1407 fn chat_completions_stream_maps_reasoning_data() {
1408 use copilot_chat::{
1409 FunctionChunk, ResponseChoice, ResponseDelta, ResponseEvent, Role, ToolCallChunk,
1410 };
1411
1412 let events = vec![
1413 ResponseEvent {
1414 choices: vec![ResponseChoice {
1415 index: Some(0),
1416 finish_reason: None,
1417 delta: Some(ResponseDelta {
1418 content: None,
1419 role: Some(Role::Assistant),
1420 tool_calls: vec![ToolCallChunk {
1421 index: Some(0),
1422 id: Some("call_abc123".to_string()),
1423 function: Some(FunctionChunk {
1424 name: Some("list_directory".to_string()),
1425 arguments: Some("{\"path\":\"test\"}".to_string()),
1426 thought_signature: None,
1427 }),
1428 }],
1429 reasoning_opaque: Some("encrypted_reasoning_token_xyz".to_string()),
1430 reasoning_text: Some("Let me check the directory".to_string()),
1431 }),
1432 message: None,
1433 }],
1434 id: "chatcmpl-123".to_string(),
1435 usage: None,
1436 },
1437 ResponseEvent {
1438 choices: vec![ResponseChoice {
1439 index: Some(0),
1440 finish_reason: Some("tool_calls".to_string()),
1441 delta: Some(ResponseDelta {
1442 content: None,
1443 role: None,
1444 tool_calls: vec![],
1445 reasoning_opaque: None,
1446 reasoning_text: None,
1447 }),
1448 message: None,
1449 }],
1450 id: "chatcmpl-123".to_string(),
1451 usage: None,
1452 },
1453 ];
1454
1455 let mapped = futures::executor::block_on(async {
1456 map_to_language_model_completion_events(
1457 Box::pin(futures::stream::iter(events.into_iter().map(Ok))),
1458 true,
1459 )
1460 .collect::<Vec<_>>()
1461 .await
1462 });
1463
1464 let mut has_reasoning_details = false;
1465 let mut has_tool_use = false;
1466 let mut reasoning_opaque_value: Option<String> = None;
1467 let mut reasoning_text_value: Option<String> = None;
1468
1469 for event_result in mapped {
1470 match event_result {
1471 Ok(LanguageModelCompletionEvent::ReasoningDetails(details)) => {
1472 has_reasoning_details = true;
1473 reasoning_opaque_value = details
1474 .get("reasoning_opaque")
1475 .and_then(|v| v.as_str())
1476 .map(|s| s.to_string());
1477 reasoning_text_value = details
1478 .get("reasoning_text")
1479 .and_then(|v| v.as_str())
1480 .map(|s| s.to_string());
1481 }
1482 Ok(LanguageModelCompletionEvent::ToolUse(tool_use)) => {
1483 has_tool_use = true;
1484 assert_eq!(tool_use.id.to_string(), "call_abc123");
1485 assert_eq!(tool_use.name.as_ref(), "list_directory");
1486 }
1487 _ => {}
1488 }
1489 }
1490
1491 assert!(
1492 has_reasoning_details,
1493 "Should emit ReasoningDetails event for Gemini 3 reasoning"
1494 );
1495 assert!(has_tool_use, "Should emit ToolUse event");
1496 assert_eq!(
1497 reasoning_opaque_value,
1498 Some("encrypted_reasoning_token_xyz".to_string()),
1499 "Should capture reasoning_opaque"
1500 );
1501 assert_eq!(
1502 reasoning_text_value,
1503 Some("Let me check the directory".to_string()),
1504 "Should capture reasoning_text"
1505 );
1506 }
1507}