1use std::pin::Pin;
2use std::str::FromStr as _;
3use std::sync::Arc;
4
5use anyhow::{Result, anyhow};
6use cloud_llm_client::CompletionIntent;
7use collections::HashMap;
8use copilot::{GlobalCopilotAuth, Status};
9use copilot_chat::responses as copilot_responses;
10use copilot_chat::{
11 ChatMessage, ChatMessageContent, ChatMessagePart, CopilotChat, CopilotChatConfiguration,
12 Function, FunctionContent, ImageUrl, Model as CopilotChatModel, ModelVendor,
13 Request as CopilotChatRequest, ResponseEvent, Tool, ToolCall, ToolCallContent, ToolChoice,
14};
15use futures::future::BoxFuture;
16use futures::stream::BoxStream;
17use futures::{FutureExt, Stream, StreamExt};
18use gpui::{AnyView, App, AsyncApp, Entity, Subscription, Task};
19use http_client::StatusCode;
20use language::language_settings::all_language_settings;
21use language_model::{
22 AuthenticateError, IconOrSvg, LanguageModel, LanguageModelCompletionError,
23 LanguageModelCompletionEvent, LanguageModelCostInfo, LanguageModelId, LanguageModelName,
24 LanguageModelProvider, LanguageModelProviderId, LanguageModelProviderName,
25 LanguageModelProviderState, LanguageModelRequest, LanguageModelRequestMessage,
26 LanguageModelToolChoice, LanguageModelToolResultContent, LanguageModelToolSchemaFormat,
27 LanguageModelToolUse, MessageContent, RateLimiter, Role, StopReason, TokenUsage,
28};
29use settings::SettingsStore;
30use ui::prelude::*;
31use util::debug_panic;
32
33use crate::provider::util::parse_tool_arguments;
34
35const PROVIDER_ID: LanguageModelProviderId = LanguageModelProviderId::new("copilot_chat");
36const PROVIDER_NAME: LanguageModelProviderName =
37 LanguageModelProviderName::new("GitHub Copilot Chat");
38
39pub struct CopilotChatLanguageModelProvider {
40 state: Entity<State>,
41}
42
43pub struct State {
44 _copilot_chat_subscription: Option<Subscription>,
45 _settings_subscription: Subscription,
46}
47
48impl State {
49 fn is_authenticated(&self, cx: &App) -> bool {
50 CopilotChat::global(cx)
51 .map(|m| m.read(cx).is_authenticated())
52 .unwrap_or(false)
53 }
54}
55
56impl CopilotChatLanguageModelProvider {
57 pub fn new(cx: &mut App) -> Self {
58 let state = cx.new(|cx| {
59 let copilot_chat_subscription = CopilotChat::global(cx)
60 .map(|copilot_chat| cx.observe(&copilot_chat, |_, _, cx| cx.notify()));
61 State {
62 _copilot_chat_subscription: copilot_chat_subscription,
63 _settings_subscription: cx.observe_global::<SettingsStore>(|_, cx| {
64 if let Some(copilot_chat) = CopilotChat::global(cx) {
65 let language_settings = all_language_settings(None, cx);
66 let configuration = CopilotChatConfiguration {
67 enterprise_uri: language_settings
68 .edit_predictions
69 .copilot
70 .enterprise_uri
71 .clone(),
72 };
73 copilot_chat.update(cx, |chat, cx| {
74 chat.set_configuration(configuration, cx);
75 });
76 }
77 cx.notify();
78 }),
79 }
80 });
81
82 Self { state }
83 }
84
85 fn create_language_model(&self, model: CopilotChatModel) -> Arc<dyn LanguageModel> {
86 Arc::new(CopilotChatLanguageModel {
87 model,
88 request_limiter: RateLimiter::new(4),
89 })
90 }
91}
92
93impl LanguageModelProviderState for CopilotChatLanguageModelProvider {
94 type ObservableEntity = State;
95
96 fn observable_entity(&self) -> Option<Entity<Self::ObservableEntity>> {
97 Some(self.state.clone())
98 }
99}
100
101impl LanguageModelProvider for CopilotChatLanguageModelProvider {
102 fn id(&self) -> LanguageModelProviderId {
103 PROVIDER_ID
104 }
105
106 fn name(&self) -> LanguageModelProviderName {
107 PROVIDER_NAME
108 }
109
110 fn icon(&self) -> IconOrSvg {
111 IconOrSvg::Icon(IconName::Copilot)
112 }
113
114 fn default_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
115 let models = CopilotChat::global(cx).and_then(|m| m.read(cx).models())?;
116 models
117 .first()
118 .map(|model| self.create_language_model(model.clone()))
119 }
120
121 fn default_fast_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
122 // The default model should be Copilot Chat's 'base model', which is likely a relatively fast
123 // model (e.g. 4o) and a sensible choice when considering premium requests
124 self.default_model(cx)
125 }
126
127 fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
128 let Some(models) = CopilotChat::global(cx).and_then(|m| m.read(cx).models()) else {
129 return Vec::new();
130 };
131 models
132 .iter()
133 .map(|model| self.create_language_model(model.clone()))
134 .collect()
135 }
136
137 fn is_authenticated(&self, cx: &App) -> bool {
138 self.state.read(cx).is_authenticated(cx)
139 }
140
141 fn authenticate(&self, cx: &mut App) -> Task<Result<(), AuthenticateError>> {
142 if self.is_authenticated(cx) {
143 return Task::ready(Ok(()));
144 };
145
146 let Some(copilot) = GlobalCopilotAuth::try_global(cx).cloned() else {
147 return Task::ready(Err(anyhow!(concat!(
148 "Copilot must be enabled for Copilot Chat to work. ",
149 "Please enable Copilot and try again."
150 ))
151 .into()));
152 };
153
154 let err = match copilot.0.read(cx).status() {
155 Status::Authorized => return Task::ready(Ok(())),
156 Status::Disabled => anyhow!(
157 "Copilot must be enabled for Copilot Chat to work. Please enable Copilot and try again."
158 ),
159 Status::Error(err) => anyhow!(format!(
160 "Received the following error while signing into Copilot: {err}"
161 )),
162 Status::Starting { task: _ } => anyhow!(
163 "Copilot is still starting, please wait for Copilot to start then try again"
164 ),
165 Status::Unauthorized => anyhow!(
166 "Unable to authorize with Copilot. Please make sure that you have an active Copilot and Copilot Chat subscription."
167 ),
168 Status::SignedOut { .. } => {
169 anyhow!("You have signed out of Copilot. Please sign in to Copilot and try again.")
170 }
171 Status::SigningIn { prompt: _ } => anyhow!("Still signing into Copilot..."),
172 };
173
174 Task::ready(Err(err.into()))
175 }
176
177 fn configuration_view(
178 &self,
179 _target_agent: language_model::ConfigurationViewTargetAgent,
180 _: &mut Window,
181 cx: &mut App,
182 ) -> AnyView {
183 cx.new(|cx| {
184 copilot_ui::ConfigurationView::new(
185 |cx| {
186 CopilotChat::global(cx)
187 .map(|m| m.read(cx).is_authenticated())
188 .unwrap_or(false)
189 },
190 copilot_ui::ConfigurationMode::Chat,
191 cx,
192 )
193 })
194 .into()
195 }
196
197 fn reset_credentials(&self, _cx: &mut App) -> Task<Result<()>> {
198 Task::ready(Err(anyhow!(
199 "Signing out of GitHub Copilot Chat is currently not supported."
200 )))
201 }
202}
203
204fn collect_tiktoken_messages(
205 request: LanguageModelRequest,
206) -> Vec<tiktoken_rs::ChatCompletionRequestMessage> {
207 request
208 .messages
209 .into_iter()
210 .map(|message| tiktoken_rs::ChatCompletionRequestMessage {
211 role: match message.role {
212 Role::User => "user".into(),
213 Role::Assistant => "assistant".into(),
214 Role::System => "system".into(),
215 },
216 content: Some(message.string_contents()),
217 name: None,
218 function_call: None,
219 })
220 .collect::<Vec<_>>()
221}
222
223pub struct CopilotChatLanguageModel {
224 model: CopilotChatModel,
225 request_limiter: RateLimiter,
226}
227
228impl LanguageModel for CopilotChatLanguageModel {
229 fn id(&self) -> LanguageModelId {
230 LanguageModelId::from(self.model.id().to_string())
231 }
232
233 fn name(&self) -> LanguageModelName {
234 LanguageModelName::from(self.model.display_name().to_string())
235 }
236
237 fn provider_id(&self) -> LanguageModelProviderId {
238 PROVIDER_ID
239 }
240
241 fn provider_name(&self) -> LanguageModelProviderName {
242 PROVIDER_NAME
243 }
244
245 fn supports_tools(&self) -> bool {
246 self.model.supports_tools()
247 }
248
249 fn supports_images(&self) -> bool {
250 self.model.supports_vision()
251 }
252
253 fn tool_input_format(&self) -> LanguageModelToolSchemaFormat {
254 match self.model.vendor() {
255 ModelVendor::OpenAI | ModelVendor::Anthropic => {
256 LanguageModelToolSchemaFormat::JsonSchema
257 }
258 ModelVendor::Google | ModelVendor::XAI | ModelVendor::Unknown => {
259 LanguageModelToolSchemaFormat::JsonSchemaSubset
260 }
261 }
262 }
263
264 fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
265 match choice {
266 LanguageModelToolChoice::Auto
267 | LanguageModelToolChoice::Any
268 | LanguageModelToolChoice::None => self.supports_tools(),
269 }
270 }
271
272 fn model_cost_info(&self) -> Option<LanguageModelCostInfo> {
273 LanguageModelCostInfo::RequestCost {
274 cost_per_request: self.model.multiplier(),
275 }
276 .into()
277 }
278
279 fn telemetry_id(&self) -> String {
280 format!("copilot_chat/{}", self.model.id())
281 }
282
283 fn max_token_count(&self) -> u64 {
284 self.model.max_token_count()
285 }
286
287 fn count_tokens(
288 &self,
289 request: LanguageModelRequest,
290 cx: &App,
291 ) -> BoxFuture<'static, Result<u64>> {
292 let model = self.model.clone();
293 cx.background_spawn(async move {
294 let messages = collect_tiktoken_messages(request);
295 // Copilot uses OpenAI tiktoken tokenizer for all it's model irrespective of the underlying provider(vendor).
296 let tokenizer_model = match model.tokenizer() {
297 Some("o200k_base") => "gpt-4o",
298 Some("cl100k_base") => "gpt-4",
299 _ => "gpt-4o",
300 };
301
302 tiktoken_rs::num_tokens_from_messages(tokenizer_model, &messages)
303 .map(|tokens| tokens as u64)
304 })
305 .boxed()
306 }
307
308 fn stream_completion(
309 &self,
310 request: LanguageModelRequest,
311 cx: &AsyncApp,
312 ) -> BoxFuture<
313 'static,
314 Result<
315 BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>,
316 LanguageModelCompletionError,
317 >,
318 > {
319 let is_user_initiated = request.intent.is_none_or(|intent| match intent {
320 CompletionIntent::UserPrompt
321 | CompletionIntent::ThreadContextSummarization
322 | CompletionIntent::InlineAssist
323 | CompletionIntent::TerminalInlineAssist
324 | CompletionIntent::GenerateGitCommitMessage => true,
325
326 CompletionIntent::ToolResults
327 | CompletionIntent::ThreadSummarization
328 | CompletionIntent::CreateFile
329 | CompletionIntent::EditFile => false,
330 });
331
332 if self.model.supports_response() {
333 let responses_request = into_copilot_responses(&self.model, request);
334 let request_limiter = self.request_limiter.clone();
335 let future = cx.spawn(async move |cx| {
336 let request =
337 CopilotChat::stream_response(responses_request, is_user_initiated, cx.clone());
338 request_limiter
339 .stream(async move {
340 let stream = request.await?;
341 let mapper = CopilotResponsesEventMapper::new();
342 Ok(mapper.map_stream(stream).boxed())
343 })
344 .await
345 });
346 return async move { Ok(future.await?.boxed()) }.boxed();
347 }
348
349 let copilot_request = match into_copilot_chat(&self.model, request) {
350 Ok(request) => request,
351 Err(err) => return futures::future::ready(Err(err.into())).boxed(),
352 };
353 let is_streaming = copilot_request.stream;
354
355 let request_limiter = self.request_limiter.clone();
356 let future = cx.spawn(async move |cx| {
357 let request =
358 CopilotChat::stream_completion(copilot_request, is_user_initiated, cx.clone());
359 request_limiter
360 .stream(async move {
361 let response = request.await?;
362 Ok(map_to_language_model_completion_events(
363 response,
364 is_streaming,
365 ))
366 })
367 .await
368 });
369 async move { Ok(future.await?.boxed()) }.boxed()
370 }
371}
372
373pub fn map_to_language_model_completion_events(
374 events: Pin<Box<dyn Send + Stream<Item = Result<ResponseEvent>>>>,
375 is_streaming: bool,
376) -> impl Stream<Item = Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
377 #[derive(Default)]
378 struct RawToolCall {
379 id: String,
380 name: String,
381 arguments: String,
382 thought_signature: Option<String>,
383 }
384
385 struct State {
386 events: Pin<Box<dyn Send + Stream<Item = Result<ResponseEvent>>>>,
387 tool_calls_by_index: HashMap<usize, RawToolCall>,
388 reasoning_opaque: Option<String>,
389 reasoning_text: Option<String>,
390 }
391
392 futures::stream::unfold(
393 State {
394 events,
395 tool_calls_by_index: HashMap::default(),
396 reasoning_opaque: None,
397 reasoning_text: None,
398 },
399 move |mut state| async move {
400 if let Some(event) = state.events.next().await {
401 match event {
402 Ok(event) => {
403 let Some(choice) = event.choices.first() else {
404 return Some((
405 vec![Err(anyhow!("Response contained no choices").into())],
406 state,
407 ));
408 };
409
410 let delta = if is_streaming {
411 choice.delta.as_ref()
412 } else {
413 choice.message.as_ref()
414 };
415
416 let Some(delta) = delta else {
417 return Some((
418 vec![Err(anyhow!("Response contained no delta").into())],
419 state,
420 ));
421 };
422
423 let mut events = Vec::new();
424 if let Some(content) = delta.content.clone() {
425 events.push(Ok(LanguageModelCompletionEvent::Text(content)));
426 }
427
428 // Capture reasoning data from the delta (e.g. for Gemini 3)
429 if let Some(opaque) = delta.reasoning_opaque.clone() {
430 state.reasoning_opaque = Some(opaque);
431 }
432 if let Some(text) = delta.reasoning_text.clone() {
433 state.reasoning_text = Some(text);
434 }
435
436 for (index, tool_call) in delta.tool_calls.iter().enumerate() {
437 let tool_index = tool_call.index.unwrap_or(index);
438 let entry = state.tool_calls_by_index.entry(tool_index).or_default();
439
440 if let Some(tool_id) = tool_call.id.clone() {
441 entry.id = tool_id;
442 }
443
444 if let Some(function) = tool_call.function.as_ref() {
445 if let Some(name) = function.name.clone() {
446 entry.name = name;
447 }
448
449 if let Some(arguments) = function.arguments.clone() {
450 entry.arguments.push_str(&arguments);
451 }
452
453 if let Some(thought_signature) = function.thought_signature.clone()
454 {
455 entry.thought_signature = Some(thought_signature);
456 }
457 }
458 }
459
460 if let Some(usage) = event.usage {
461 events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(
462 TokenUsage {
463 input_tokens: usage.prompt_tokens,
464 output_tokens: usage.completion_tokens,
465 cache_creation_input_tokens: 0,
466 cache_read_input_tokens: 0,
467 },
468 )));
469 }
470
471 match choice.finish_reason.as_deref() {
472 Some("stop") => {
473 events.push(Ok(LanguageModelCompletionEvent::Stop(
474 StopReason::EndTurn,
475 )));
476 }
477 Some("tool_calls") => {
478 // Gemini 3 models send reasoning_opaque/reasoning_text that must
479 // be preserved and sent back in subsequent requests. Emit as
480 // ReasoningDetails so the agent stores it in the message.
481 if state.reasoning_opaque.is_some()
482 || state.reasoning_text.is_some()
483 {
484 let mut details = serde_json::Map::new();
485 if let Some(opaque) = state.reasoning_opaque.take() {
486 details.insert(
487 "reasoning_opaque".to_string(),
488 serde_json::Value::String(opaque),
489 );
490 }
491 if let Some(text) = state.reasoning_text.take() {
492 details.insert(
493 "reasoning_text".to_string(),
494 serde_json::Value::String(text),
495 );
496 }
497 events.push(Ok(
498 LanguageModelCompletionEvent::ReasoningDetails(
499 serde_json::Value::Object(details),
500 ),
501 ));
502 }
503
504 events.extend(state.tool_calls_by_index.drain().map(
505 |(_, tool_call)| match parse_tool_arguments(
506 &tool_call.arguments,
507 ) {
508 Ok(input) => Ok(LanguageModelCompletionEvent::ToolUse(
509 LanguageModelToolUse {
510 id: tool_call.id.into(),
511 name: tool_call.name.as_str().into(),
512 is_input_complete: true,
513 input,
514 raw_input: tool_call.arguments,
515 thought_signature: tool_call.thought_signature,
516 },
517 )),
518 Err(error) => Ok(
519 LanguageModelCompletionEvent::ToolUseJsonParseError {
520 id: tool_call.id.into(),
521 tool_name: tool_call.name.as_str().into(),
522 raw_input: tool_call.arguments.into(),
523 json_parse_error: error.to_string(),
524 },
525 ),
526 },
527 ));
528
529 events.push(Ok(LanguageModelCompletionEvent::Stop(
530 StopReason::ToolUse,
531 )));
532 }
533 Some(stop_reason) => {
534 log::error!("Unexpected Copilot Chat stop_reason: {stop_reason:?}");
535 events.push(Ok(LanguageModelCompletionEvent::Stop(
536 StopReason::EndTurn,
537 )));
538 }
539 None => {}
540 }
541
542 return Some((events, state));
543 }
544 Err(err) => return Some((vec![Err(anyhow!(err).into())], state)),
545 }
546 }
547
548 None
549 },
550 )
551 .flat_map(futures::stream::iter)
552}
553
554pub struct CopilotResponsesEventMapper {
555 pending_stop_reason: Option<StopReason>,
556}
557
558impl CopilotResponsesEventMapper {
559 pub fn new() -> Self {
560 Self {
561 pending_stop_reason: None,
562 }
563 }
564
565 pub fn map_stream(
566 mut self,
567 events: Pin<Box<dyn Send + Stream<Item = Result<copilot_responses::StreamEvent>>>>,
568 ) -> impl Stream<Item = Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
569 {
570 events.flat_map(move |event| {
571 futures::stream::iter(match event {
572 Ok(event) => self.map_event(event),
573 Err(error) => vec![Err(LanguageModelCompletionError::from(anyhow!(error)))],
574 })
575 })
576 }
577
578 fn map_event(
579 &mut self,
580 event: copilot_responses::StreamEvent,
581 ) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
582 match event {
583 copilot_responses::StreamEvent::OutputItemAdded { item, .. } => match item {
584 copilot_responses::ResponseOutputItem::Message { id, .. } => {
585 vec![Ok(LanguageModelCompletionEvent::StartMessage {
586 message_id: id,
587 })]
588 }
589 _ => Vec::new(),
590 },
591
592 copilot_responses::StreamEvent::OutputTextDelta { delta, .. } => {
593 if delta.is_empty() {
594 Vec::new()
595 } else {
596 vec![Ok(LanguageModelCompletionEvent::Text(delta))]
597 }
598 }
599
600 copilot_responses::StreamEvent::OutputItemDone { item, .. } => match item {
601 copilot_responses::ResponseOutputItem::Message { .. } => Vec::new(),
602 copilot_responses::ResponseOutputItem::FunctionCall {
603 call_id,
604 name,
605 arguments,
606 thought_signature,
607 ..
608 } => {
609 let mut events = Vec::new();
610 match parse_tool_arguments(&arguments) {
611 Ok(input) => events.push(Ok(LanguageModelCompletionEvent::ToolUse(
612 LanguageModelToolUse {
613 id: call_id.into(),
614 name: name.as_str().into(),
615 is_input_complete: true,
616 input,
617 raw_input: arguments.clone(),
618 thought_signature,
619 },
620 ))),
621 Err(error) => {
622 events.push(Ok(LanguageModelCompletionEvent::ToolUseJsonParseError {
623 id: call_id.into(),
624 tool_name: name.as_str().into(),
625 raw_input: arguments.clone().into(),
626 json_parse_error: error.to_string(),
627 }))
628 }
629 }
630 // Record that we already emitted a tool-use stop so we can avoid duplicating
631 // a Stop event on Completed.
632 self.pending_stop_reason = Some(StopReason::ToolUse);
633 events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::ToolUse)));
634 events
635 }
636 copilot_responses::ResponseOutputItem::Reasoning {
637 summary,
638 encrypted_content,
639 ..
640 } => {
641 let mut events = Vec::new();
642
643 if let Some(blocks) = summary {
644 let mut text = String::new();
645 for block in blocks {
646 text.push_str(&block.text);
647 }
648 if !text.is_empty() {
649 events.push(Ok(LanguageModelCompletionEvent::Thinking {
650 text,
651 signature: None,
652 }));
653 }
654 }
655
656 if let Some(data) = encrypted_content {
657 events.push(Ok(LanguageModelCompletionEvent::RedactedThinking { data }));
658 }
659
660 events
661 }
662 },
663
664 copilot_responses::StreamEvent::Completed { response } => {
665 let mut events = Vec::new();
666 if let Some(usage) = response.usage {
667 events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
668 input_tokens: usage.input_tokens.unwrap_or(0),
669 output_tokens: usage.output_tokens.unwrap_or(0),
670 cache_creation_input_tokens: 0,
671 cache_read_input_tokens: 0,
672 })));
673 }
674 if self.pending_stop_reason.take() != Some(StopReason::ToolUse) {
675 events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::EndTurn)));
676 }
677 events
678 }
679
680 copilot_responses::StreamEvent::Incomplete { response } => {
681 let reason = response
682 .incomplete_details
683 .as_ref()
684 .and_then(|details| details.reason.as_ref());
685 let stop_reason = match reason {
686 Some(copilot_responses::IncompleteReason::MaxOutputTokens) => {
687 StopReason::MaxTokens
688 }
689 Some(copilot_responses::IncompleteReason::ContentFilter) => StopReason::Refusal,
690 _ => self
691 .pending_stop_reason
692 .take()
693 .unwrap_or(StopReason::EndTurn),
694 };
695
696 let mut events = Vec::new();
697 if let Some(usage) = response.usage {
698 events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
699 input_tokens: usage.input_tokens.unwrap_or(0),
700 output_tokens: usage.output_tokens.unwrap_or(0),
701 cache_creation_input_tokens: 0,
702 cache_read_input_tokens: 0,
703 })));
704 }
705 events.push(Ok(LanguageModelCompletionEvent::Stop(stop_reason)));
706 events
707 }
708
709 copilot_responses::StreamEvent::Failed { response } => {
710 let provider = PROVIDER_NAME;
711 let (status_code, message) = match response.error {
712 Some(error) => {
713 let status_code = StatusCode::from_str(&error.code)
714 .unwrap_or(StatusCode::INTERNAL_SERVER_ERROR);
715 (status_code, error.message)
716 }
717 None => (
718 StatusCode::INTERNAL_SERVER_ERROR,
719 "response.failed".to_string(),
720 ),
721 };
722 vec![Err(LanguageModelCompletionError::HttpResponseError {
723 provider,
724 status_code,
725 message,
726 })]
727 }
728
729 copilot_responses::StreamEvent::GenericError { error } => vec![Err(
730 LanguageModelCompletionError::Other(anyhow!(format!("{error:?}"))),
731 )],
732
733 copilot_responses::StreamEvent::Created { .. }
734 | copilot_responses::StreamEvent::Unknown => Vec::new(),
735 }
736 }
737}
738
739fn into_copilot_chat(
740 model: &CopilotChatModel,
741 request: LanguageModelRequest,
742) -> Result<CopilotChatRequest> {
743 let mut request_messages: Vec<LanguageModelRequestMessage> = Vec::new();
744 for message in request.messages {
745 if let Some(last_message) = request_messages.last_mut() {
746 if last_message.role == message.role {
747 last_message.content.extend(message.content);
748 } else {
749 request_messages.push(message);
750 }
751 } else {
752 request_messages.push(message);
753 }
754 }
755
756 let mut messages: Vec<ChatMessage> = Vec::new();
757 for message in request_messages {
758 match message.role {
759 Role::User => {
760 for content in &message.content {
761 if let MessageContent::ToolResult(tool_result) = content {
762 let content = match &tool_result.content {
763 LanguageModelToolResultContent::Text(text) => text.to_string().into(),
764 LanguageModelToolResultContent::Image(image) => {
765 if model.supports_vision() {
766 ChatMessageContent::Multipart(vec![ChatMessagePart::Image {
767 image_url: ImageUrl {
768 url: image.to_base64_url(),
769 },
770 }])
771 } else {
772 debug_panic!(
773 "This should be caught at {} level",
774 tool_result.tool_name
775 );
776 "[Tool responded with an image, but this model does not support vision]".to_string().into()
777 }
778 }
779 };
780
781 messages.push(ChatMessage::Tool {
782 tool_call_id: tool_result.tool_use_id.to_string(),
783 content,
784 });
785 }
786 }
787
788 let mut content_parts = Vec::new();
789 for content in &message.content {
790 match content {
791 MessageContent::Text(text) | MessageContent::Thinking { text, .. }
792 if !text.is_empty() =>
793 {
794 if let Some(ChatMessagePart::Text { text: text_content }) =
795 content_parts.last_mut()
796 {
797 text_content.push_str(text);
798 } else {
799 content_parts.push(ChatMessagePart::Text {
800 text: text.to_string(),
801 });
802 }
803 }
804 MessageContent::Image(image) if model.supports_vision() => {
805 content_parts.push(ChatMessagePart::Image {
806 image_url: ImageUrl {
807 url: image.to_base64_url(),
808 },
809 });
810 }
811 _ => {}
812 }
813 }
814
815 if !content_parts.is_empty() {
816 messages.push(ChatMessage::User {
817 content: content_parts.into(),
818 });
819 }
820 }
821 Role::Assistant => {
822 let mut tool_calls = Vec::new();
823 for content in &message.content {
824 if let MessageContent::ToolUse(tool_use) = content {
825 tool_calls.push(ToolCall {
826 id: tool_use.id.to_string(),
827 content: ToolCallContent::Function {
828 function: FunctionContent {
829 name: tool_use.name.to_string(),
830 arguments: serde_json::to_string(&tool_use.input)?,
831 thought_signature: tool_use.thought_signature.clone(),
832 },
833 },
834 });
835 }
836 }
837
838 let text_content = {
839 let mut buffer = String::new();
840 for string in message.content.iter().filter_map(|content| match content {
841 MessageContent::Text(text) | MessageContent::Thinking { text, .. } => {
842 Some(text.as_str())
843 }
844 MessageContent::ToolUse(_)
845 | MessageContent::RedactedThinking(_)
846 | MessageContent::ToolResult(_)
847 | MessageContent::Image(_) => None,
848 }) {
849 buffer.push_str(string);
850 }
851
852 buffer
853 };
854
855 // Extract reasoning_opaque and reasoning_text from reasoning_details
856 let (reasoning_opaque, reasoning_text) =
857 if let Some(details) = &message.reasoning_details {
858 let opaque = details
859 .get("reasoning_opaque")
860 .and_then(|v| v.as_str())
861 .map(|s| s.to_string());
862 let text = details
863 .get("reasoning_text")
864 .and_then(|v| v.as_str())
865 .map(|s| s.to_string());
866 (opaque, text)
867 } else {
868 (None, None)
869 };
870
871 messages.push(ChatMessage::Assistant {
872 content: if text_content.is_empty() {
873 ChatMessageContent::empty()
874 } else {
875 text_content.into()
876 },
877 tool_calls,
878 reasoning_opaque,
879 reasoning_text,
880 });
881 }
882 Role::System => messages.push(ChatMessage::System {
883 content: message.string_contents(),
884 }),
885 }
886 }
887
888 let tools = request
889 .tools
890 .iter()
891 .map(|tool| Tool::Function {
892 function: Function {
893 name: tool.name.clone(),
894 description: tool.description.clone(),
895 parameters: tool.input_schema.clone(),
896 },
897 })
898 .collect::<Vec<_>>();
899
900 Ok(CopilotChatRequest {
901 intent: true,
902 n: 1,
903 stream: model.uses_streaming(),
904 temperature: 0.1,
905 model: model.id().to_string(),
906 messages,
907 tools,
908 tool_choice: request.tool_choice.map(|choice| match choice {
909 LanguageModelToolChoice::Auto => ToolChoice::Auto,
910 LanguageModelToolChoice::Any => ToolChoice::Any,
911 LanguageModelToolChoice::None => ToolChoice::None,
912 }),
913 })
914}
915
916fn into_copilot_responses(
917 model: &CopilotChatModel,
918 request: LanguageModelRequest,
919) -> copilot_responses::Request {
920 use copilot_responses as responses;
921
922 let LanguageModelRequest {
923 thread_id: _,
924 prompt_id: _,
925 intent: _,
926 messages,
927 tools,
928 tool_choice,
929 stop: _,
930 temperature,
931 thinking_allowed: _,
932 thinking_effort: _,
933 } = request;
934
935 let mut input_items: Vec<responses::ResponseInputItem> = Vec::new();
936
937 for message in messages {
938 match message.role {
939 Role::User => {
940 for content in &message.content {
941 if let MessageContent::ToolResult(tool_result) = content {
942 let output = if let Some(out) = &tool_result.output {
943 match out {
944 serde_json::Value::String(s) => {
945 responses::ResponseFunctionOutput::Text(s.clone())
946 }
947 serde_json::Value::Null => {
948 responses::ResponseFunctionOutput::Text(String::new())
949 }
950 other => responses::ResponseFunctionOutput::Text(other.to_string()),
951 }
952 } else {
953 match &tool_result.content {
954 LanguageModelToolResultContent::Text(text) => {
955 responses::ResponseFunctionOutput::Text(text.to_string())
956 }
957 LanguageModelToolResultContent::Image(image) => {
958 if model.supports_vision() {
959 responses::ResponseFunctionOutput::Content(vec![
960 responses::ResponseInputContent::InputImage {
961 image_url: Some(image.to_base64_url()),
962 detail: Default::default(),
963 },
964 ])
965 } else {
966 debug_panic!(
967 "This should be caught at {} level",
968 tool_result.tool_name
969 );
970 responses::ResponseFunctionOutput::Text(
971 "[Tool responded with an image, but this model does not support vision]".into(),
972 )
973 }
974 }
975 }
976 };
977
978 input_items.push(responses::ResponseInputItem::FunctionCallOutput {
979 call_id: tool_result.tool_use_id.to_string(),
980 output,
981 status: None,
982 });
983 }
984 }
985
986 let mut parts: Vec<responses::ResponseInputContent> = Vec::new();
987 for content in &message.content {
988 match content {
989 MessageContent::Text(text) => {
990 parts.push(responses::ResponseInputContent::InputText {
991 text: text.clone(),
992 });
993 }
994
995 MessageContent::Image(image) => {
996 if model.supports_vision() {
997 parts.push(responses::ResponseInputContent::InputImage {
998 image_url: Some(image.to_base64_url()),
999 detail: Default::default(),
1000 });
1001 }
1002 }
1003 _ => {}
1004 }
1005 }
1006
1007 if !parts.is_empty() {
1008 input_items.push(responses::ResponseInputItem::Message {
1009 role: "user".into(),
1010 content: Some(parts),
1011 status: None,
1012 });
1013 }
1014 }
1015
1016 Role::Assistant => {
1017 for content in &message.content {
1018 if let MessageContent::ToolUse(tool_use) = content {
1019 input_items.push(responses::ResponseInputItem::FunctionCall {
1020 call_id: tool_use.id.to_string(),
1021 name: tool_use.name.to_string(),
1022 arguments: tool_use.raw_input.clone(),
1023 status: None,
1024 thought_signature: tool_use.thought_signature.clone(),
1025 });
1026 }
1027 }
1028
1029 for content in &message.content {
1030 if let MessageContent::RedactedThinking(data) = content {
1031 input_items.push(responses::ResponseInputItem::Reasoning {
1032 id: None,
1033 summary: Vec::new(),
1034 encrypted_content: data.clone(),
1035 });
1036 }
1037 }
1038
1039 let mut parts: Vec<responses::ResponseInputContent> = Vec::new();
1040 for content in &message.content {
1041 match content {
1042 MessageContent::Text(text) => {
1043 parts.push(responses::ResponseInputContent::OutputText {
1044 text: text.clone(),
1045 });
1046 }
1047 MessageContent::Image(_) => {
1048 parts.push(responses::ResponseInputContent::OutputText {
1049 text: "[image omitted]".to_string(),
1050 });
1051 }
1052 _ => {}
1053 }
1054 }
1055
1056 if !parts.is_empty() {
1057 input_items.push(responses::ResponseInputItem::Message {
1058 role: "assistant".into(),
1059 content: Some(parts),
1060 status: Some("completed".into()),
1061 });
1062 }
1063 }
1064
1065 Role::System => {
1066 let mut parts: Vec<responses::ResponseInputContent> = Vec::new();
1067 for content in &message.content {
1068 if let MessageContent::Text(text) = content {
1069 parts.push(responses::ResponseInputContent::InputText {
1070 text: text.clone(),
1071 });
1072 }
1073 }
1074
1075 if !parts.is_empty() {
1076 input_items.push(responses::ResponseInputItem::Message {
1077 role: "system".into(),
1078 content: Some(parts),
1079 status: None,
1080 });
1081 }
1082 }
1083 }
1084 }
1085
1086 let converted_tools: Vec<responses::ToolDefinition> = tools
1087 .into_iter()
1088 .map(|tool| responses::ToolDefinition::Function {
1089 name: tool.name,
1090 description: Some(tool.description),
1091 parameters: Some(tool.input_schema),
1092 strict: None,
1093 })
1094 .collect();
1095
1096 let mapped_tool_choice = tool_choice.map(|choice| match choice {
1097 LanguageModelToolChoice::Auto => responses::ToolChoice::Auto,
1098 LanguageModelToolChoice::Any => responses::ToolChoice::Any,
1099 LanguageModelToolChoice::None => responses::ToolChoice::None,
1100 });
1101
1102 responses::Request {
1103 model: model.id().to_string(),
1104 input: input_items,
1105 stream: model.uses_streaming(),
1106 temperature,
1107 tools: converted_tools,
1108 tool_choice: mapped_tool_choice,
1109 reasoning: None, // We would need to add support for setting from user settings.
1110 include: Some(vec![
1111 copilot_responses::ResponseIncludable::ReasoningEncryptedContent,
1112 ]),
1113 }
1114}
1115
1116#[cfg(test)]
1117mod tests {
1118 use super::*;
1119 use copilot_chat::responses;
1120 use futures::StreamExt;
1121
1122 fn map_events(events: Vec<responses::StreamEvent>) -> Vec<LanguageModelCompletionEvent> {
1123 futures::executor::block_on(async {
1124 CopilotResponsesEventMapper::new()
1125 .map_stream(Box::pin(futures::stream::iter(events.into_iter().map(Ok))))
1126 .collect::<Vec<_>>()
1127 .await
1128 .into_iter()
1129 .map(Result::unwrap)
1130 .collect()
1131 })
1132 }
1133
1134 #[test]
1135 fn responses_stream_maps_text_and_usage() {
1136 let events = vec![
1137 responses::StreamEvent::OutputItemAdded {
1138 output_index: 0,
1139 sequence_number: None,
1140 item: responses::ResponseOutputItem::Message {
1141 id: "msg_1".into(),
1142 role: "assistant".into(),
1143 content: Some(Vec::new()),
1144 },
1145 },
1146 responses::StreamEvent::OutputTextDelta {
1147 item_id: "msg_1".into(),
1148 output_index: 0,
1149 delta: "Hello".into(),
1150 },
1151 responses::StreamEvent::Completed {
1152 response: responses::Response {
1153 usage: Some(responses::ResponseUsage {
1154 input_tokens: Some(5),
1155 output_tokens: Some(3),
1156 total_tokens: Some(8),
1157 }),
1158 ..Default::default()
1159 },
1160 },
1161 ];
1162
1163 let mapped = map_events(events);
1164 assert!(matches!(
1165 mapped[0],
1166 LanguageModelCompletionEvent::StartMessage { ref message_id } if message_id == "msg_1"
1167 ));
1168 assert!(matches!(
1169 mapped[1],
1170 LanguageModelCompletionEvent::Text(ref text) if text == "Hello"
1171 ));
1172 assert!(matches!(
1173 mapped[2],
1174 LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
1175 input_tokens: 5,
1176 output_tokens: 3,
1177 ..
1178 })
1179 ));
1180 assert!(matches!(
1181 mapped[3],
1182 LanguageModelCompletionEvent::Stop(StopReason::EndTurn)
1183 ));
1184 }
1185
1186 #[test]
1187 fn responses_stream_maps_tool_calls() {
1188 let events = vec![responses::StreamEvent::OutputItemDone {
1189 output_index: 0,
1190 sequence_number: None,
1191 item: responses::ResponseOutputItem::FunctionCall {
1192 id: Some("fn_1".into()),
1193 call_id: "call_1".into(),
1194 name: "do_it".into(),
1195 arguments: "{\"x\":1}".into(),
1196 status: None,
1197 thought_signature: None,
1198 },
1199 }];
1200
1201 let mapped = map_events(events);
1202 assert!(matches!(
1203 mapped[0],
1204 LanguageModelCompletionEvent::ToolUse(ref use_) if use_.id.to_string() == "call_1" && use_.name.as_ref() == "do_it"
1205 ));
1206 assert!(matches!(
1207 mapped[1],
1208 LanguageModelCompletionEvent::Stop(StopReason::ToolUse)
1209 ));
1210 }
1211
1212 #[test]
1213 fn responses_stream_handles_json_parse_error() {
1214 let events = vec![responses::StreamEvent::OutputItemDone {
1215 output_index: 0,
1216 sequence_number: None,
1217 item: responses::ResponseOutputItem::FunctionCall {
1218 id: Some("fn_1".into()),
1219 call_id: "call_1".into(),
1220 name: "do_it".into(),
1221 arguments: "{not json}".into(),
1222 status: None,
1223 thought_signature: None,
1224 },
1225 }];
1226
1227 let mapped = map_events(events);
1228 assert!(matches!(
1229 mapped[0],
1230 LanguageModelCompletionEvent::ToolUseJsonParseError { ref id, ref tool_name, .. }
1231 if id.to_string() == "call_1" && tool_name.as_ref() == "do_it"
1232 ));
1233 assert!(matches!(
1234 mapped[1],
1235 LanguageModelCompletionEvent::Stop(StopReason::ToolUse)
1236 ));
1237 }
1238
1239 #[test]
1240 fn responses_stream_maps_reasoning_summary_and_encrypted_content() {
1241 let events = vec![responses::StreamEvent::OutputItemDone {
1242 output_index: 0,
1243 sequence_number: None,
1244 item: responses::ResponseOutputItem::Reasoning {
1245 id: "r1".into(),
1246 summary: Some(vec![responses::ResponseReasoningItem {
1247 kind: "summary_text".into(),
1248 text: "Chain".into(),
1249 }]),
1250 encrypted_content: Some("ENC".into()),
1251 },
1252 }];
1253
1254 let mapped = map_events(events);
1255 assert!(matches!(
1256 mapped[0],
1257 LanguageModelCompletionEvent::Thinking { ref text, signature: None } if text == "Chain"
1258 ));
1259 assert!(matches!(
1260 mapped[1],
1261 LanguageModelCompletionEvent::RedactedThinking { ref data } if data == "ENC"
1262 ));
1263 }
1264
1265 #[test]
1266 fn responses_stream_handles_incomplete_max_tokens() {
1267 let events = vec![responses::StreamEvent::Incomplete {
1268 response: responses::Response {
1269 usage: Some(responses::ResponseUsage {
1270 input_tokens: Some(10),
1271 output_tokens: Some(0),
1272 total_tokens: Some(10),
1273 }),
1274 incomplete_details: Some(responses::IncompleteDetails {
1275 reason: Some(responses::IncompleteReason::MaxOutputTokens),
1276 }),
1277 ..Default::default()
1278 },
1279 }];
1280
1281 let mapped = map_events(events);
1282 assert!(matches!(
1283 mapped[0],
1284 LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
1285 input_tokens: 10,
1286 output_tokens: 0,
1287 ..
1288 })
1289 ));
1290 assert!(matches!(
1291 mapped[1],
1292 LanguageModelCompletionEvent::Stop(StopReason::MaxTokens)
1293 ));
1294 }
1295
1296 #[test]
1297 fn responses_stream_handles_incomplete_content_filter() {
1298 let events = vec![responses::StreamEvent::Incomplete {
1299 response: responses::Response {
1300 usage: None,
1301 incomplete_details: Some(responses::IncompleteDetails {
1302 reason: Some(responses::IncompleteReason::ContentFilter),
1303 }),
1304 ..Default::default()
1305 },
1306 }];
1307
1308 let mapped = map_events(events);
1309 assert!(matches!(
1310 mapped.last().unwrap(),
1311 LanguageModelCompletionEvent::Stop(StopReason::Refusal)
1312 ));
1313 }
1314
1315 #[test]
1316 fn responses_stream_completed_no_duplicate_after_tool_use() {
1317 let events = vec![
1318 responses::StreamEvent::OutputItemDone {
1319 output_index: 0,
1320 sequence_number: None,
1321 item: responses::ResponseOutputItem::FunctionCall {
1322 id: Some("fn_1".into()),
1323 call_id: "call_1".into(),
1324 name: "do_it".into(),
1325 arguments: "{}".into(),
1326 status: None,
1327 thought_signature: None,
1328 },
1329 },
1330 responses::StreamEvent::Completed {
1331 response: responses::Response::default(),
1332 },
1333 ];
1334
1335 let mapped = map_events(events);
1336
1337 let mut stop_count = 0usize;
1338 let mut saw_tool_use_stop = false;
1339 for event in mapped {
1340 if let LanguageModelCompletionEvent::Stop(reason) = event {
1341 stop_count += 1;
1342 if matches!(reason, StopReason::ToolUse) {
1343 saw_tool_use_stop = true;
1344 }
1345 }
1346 }
1347 assert_eq!(stop_count, 1, "should emit exactly one Stop event");
1348 assert!(saw_tool_use_stop, "Stop reason should be ToolUse");
1349 }
1350
1351 #[test]
1352 fn responses_stream_failed_maps_http_response_error() {
1353 let events = vec![responses::StreamEvent::Failed {
1354 response: responses::Response {
1355 error: Some(responses::ResponseError {
1356 code: "429".into(),
1357 message: "too many requests".into(),
1358 }),
1359 ..Default::default()
1360 },
1361 }];
1362
1363 let mapped_results = futures::executor::block_on(async {
1364 CopilotResponsesEventMapper::new()
1365 .map_stream(Box::pin(futures::stream::iter(events.into_iter().map(Ok))))
1366 .collect::<Vec<_>>()
1367 .await
1368 });
1369
1370 assert_eq!(mapped_results.len(), 1);
1371 match &mapped_results[0] {
1372 Err(LanguageModelCompletionError::HttpResponseError {
1373 status_code,
1374 message,
1375 ..
1376 }) => {
1377 assert_eq!(*status_code, http_client::StatusCode::TOO_MANY_REQUESTS);
1378 assert_eq!(message, "too many requests");
1379 }
1380 other => panic!("expected HttpResponseError, got {:?}", other),
1381 }
1382 }
1383
1384 #[test]
1385 fn chat_completions_stream_maps_reasoning_data() {
1386 use copilot_chat::{
1387 FunctionChunk, ResponseChoice, ResponseDelta, ResponseEvent, Role, ToolCallChunk,
1388 };
1389
1390 let events = vec![
1391 ResponseEvent {
1392 choices: vec![ResponseChoice {
1393 index: Some(0),
1394 finish_reason: None,
1395 delta: Some(ResponseDelta {
1396 content: None,
1397 role: Some(Role::Assistant),
1398 tool_calls: vec![ToolCallChunk {
1399 index: Some(0),
1400 id: Some("call_abc123".to_string()),
1401 function: Some(FunctionChunk {
1402 name: Some("list_directory".to_string()),
1403 arguments: Some("{\"path\":\"test\"}".to_string()),
1404 thought_signature: None,
1405 }),
1406 }],
1407 reasoning_opaque: Some("encrypted_reasoning_token_xyz".to_string()),
1408 reasoning_text: Some("Let me check the directory".to_string()),
1409 }),
1410 message: None,
1411 }],
1412 id: "chatcmpl-123".to_string(),
1413 usage: None,
1414 },
1415 ResponseEvent {
1416 choices: vec![ResponseChoice {
1417 index: Some(0),
1418 finish_reason: Some("tool_calls".to_string()),
1419 delta: Some(ResponseDelta {
1420 content: None,
1421 role: None,
1422 tool_calls: vec![],
1423 reasoning_opaque: None,
1424 reasoning_text: None,
1425 }),
1426 message: None,
1427 }],
1428 id: "chatcmpl-123".to_string(),
1429 usage: None,
1430 },
1431 ];
1432
1433 let mapped = futures::executor::block_on(async {
1434 map_to_language_model_completion_events(
1435 Box::pin(futures::stream::iter(events.into_iter().map(Ok))),
1436 true,
1437 )
1438 .collect::<Vec<_>>()
1439 .await
1440 });
1441
1442 let mut has_reasoning_details = false;
1443 let mut has_tool_use = false;
1444 let mut reasoning_opaque_value: Option<String> = None;
1445 let mut reasoning_text_value: Option<String> = None;
1446
1447 for event_result in mapped {
1448 match event_result {
1449 Ok(LanguageModelCompletionEvent::ReasoningDetails(details)) => {
1450 has_reasoning_details = true;
1451 reasoning_opaque_value = details
1452 .get("reasoning_opaque")
1453 .and_then(|v| v.as_str())
1454 .map(|s| s.to_string());
1455 reasoning_text_value = details
1456 .get("reasoning_text")
1457 .and_then(|v| v.as_str())
1458 .map(|s| s.to_string());
1459 }
1460 Ok(LanguageModelCompletionEvent::ToolUse(tool_use)) => {
1461 has_tool_use = true;
1462 assert_eq!(tool_use.id.to_string(), "call_abc123");
1463 assert_eq!(tool_use.name.as_ref(), "list_directory");
1464 }
1465 _ => {}
1466 }
1467 }
1468
1469 assert!(
1470 has_reasoning_details,
1471 "Should emit ReasoningDetails event for Gemini 3 reasoning"
1472 );
1473 assert!(has_tool_use, "Should emit ToolUse event");
1474 assert_eq!(
1475 reasoning_opaque_value,
1476 Some("encrypted_reasoning_token_xyz".to_string()),
1477 "Should capture reasoning_opaque"
1478 );
1479 assert_eq!(
1480 reasoning_text_value,
1481 Some("Let me check the directory".to_string()),
1482 "Should capture reasoning_text"
1483 );
1484 }
1485}