1use std::pin::Pin;
2use std::str::FromStr as _;
3use std::sync::Arc;
4
5use anyhow::{Result, anyhow};
6use cloud_llm_client::CompletionIntent;
7use collections::HashMap;
8use copilot::{Copilot, Status};
9use copilot_chat::responses as copilot_responses;
10use copilot_chat::{
11 ChatMessage, ChatMessageContent, ChatMessagePart, CopilotChat, CopilotChatConfiguration,
12 Function, FunctionContent, ImageUrl, Model as CopilotChatModel, ModelVendor,
13 Request as CopilotChatRequest, ResponseEvent, Tool, ToolCall, ToolCallContent, ToolChoice,
14};
15use futures::future::BoxFuture;
16use futures::stream::BoxStream;
17use futures::{FutureExt, Stream, StreamExt};
18use gpui::{AnyView, App, AsyncApp, Entity, Subscription, Task};
19use http_client::StatusCode;
20use language::language_settings::all_language_settings;
21use language_model::{
22 AuthenticateError, IconOrSvg, LanguageModel, LanguageModelCompletionError,
23 LanguageModelCompletionEvent, LanguageModelId, LanguageModelName, LanguageModelProvider,
24 LanguageModelProviderId, LanguageModelProviderName, LanguageModelProviderState,
25 LanguageModelRequest, LanguageModelRequestMessage, LanguageModelToolChoice,
26 LanguageModelToolResultContent, LanguageModelToolSchemaFormat, LanguageModelToolUse,
27 MessageContent, RateLimiter, Role, StopReason, TokenUsage,
28};
29use settings::SettingsStore;
30use ui::prelude::*;
31use util::debug_panic;
32
33const PROVIDER_ID: LanguageModelProviderId = LanguageModelProviderId::new("copilot_chat");
34const PROVIDER_NAME: LanguageModelProviderName =
35 LanguageModelProviderName::new("GitHub Copilot Chat");
36
37pub struct CopilotChatLanguageModelProvider {
38 state: Entity<State>,
39}
40
41pub struct State {
42 _copilot_chat_subscription: Option<Subscription>,
43 _settings_subscription: Subscription,
44}
45
46impl State {
47 fn is_authenticated(&self, cx: &App) -> bool {
48 CopilotChat::global(cx)
49 .map(|m| m.read(cx).is_authenticated())
50 .unwrap_or(false)
51 }
52}
53
54impl CopilotChatLanguageModelProvider {
55 pub fn new(cx: &mut App) -> Self {
56 let state = cx.new(|cx| {
57 let copilot_chat_subscription = CopilotChat::global(cx)
58 .map(|copilot_chat| cx.observe(&copilot_chat, |_, _, cx| cx.notify()));
59 State {
60 _copilot_chat_subscription: copilot_chat_subscription,
61 _settings_subscription: cx.observe_global::<SettingsStore>(|_, cx| {
62 if let Some(copilot_chat) = CopilotChat::global(cx) {
63 let language_settings = all_language_settings(None, cx);
64 let configuration = CopilotChatConfiguration {
65 enterprise_uri: language_settings
66 .edit_predictions
67 .copilot
68 .enterprise_uri
69 .clone(),
70 };
71 copilot_chat.update(cx, |chat, cx| {
72 chat.set_configuration(configuration, cx);
73 });
74 }
75 cx.notify();
76 }),
77 }
78 });
79
80 Self { state }
81 }
82
83 fn create_language_model(&self, model: CopilotChatModel) -> Arc<dyn LanguageModel> {
84 Arc::new(CopilotChatLanguageModel {
85 model,
86 request_limiter: RateLimiter::new(4),
87 })
88 }
89}
90
91impl LanguageModelProviderState for CopilotChatLanguageModelProvider {
92 type ObservableEntity = State;
93
94 fn observable_entity(&self) -> Option<Entity<Self::ObservableEntity>> {
95 Some(self.state.clone())
96 }
97}
98
99impl LanguageModelProvider for CopilotChatLanguageModelProvider {
100 fn id(&self) -> LanguageModelProviderId {
101 PROVIDER_ID
102 }
103
104 fn name(&self) -> LanguageModelProviderName {
105 PROVIDER_NAME
106 }
107
108 fn icon(&self) -> IconOrSvg {
109 IconOrSvg::Icon(IconName::Copilot)
110 }
111
112 fn default_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
113 let models = CopilotChat::global(cx).and_then(|m| m.read(cx).models())?;
114 models
115 .first()
116 .map(|model| self.create_language_model(model.clone()))
117 }
118
119 fn default_fast_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
120 // The default model should be Copilot Chat's 'base model', which is likely a relatively fast
121 // model (e.g. 4o) and a sensible choice when considering premium requests
122 self.default_model(cx)
123 }
124
125 fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
126 let Some(models) = CopilotChat::global(cx).and_then(|m| m.read(cx).models()) else {
127 return Vec::new();
128 };
129 models
130 .iter()
131 .map(|model| self.create_language_model(model.clone()))
132 .collect()
133 }
134
135 fn is_authenticated(&self, cx: &App) -> bool {
136 self.state.read(cx).is_authenticated(cx)
137 }
138
139 fn authenticate(&self, cx: &mut App) -> Task<Result<(), AuthenticateError>> {
140 if self.is_authenticated(cx) {
141 return Task::ready(Ok(()));
142 };
143
144 let Some(copilot) = Copilot::global(cx) else {
145 return Task::ready(Err(anyhow!(concat!(
146 "Copilot must be enabled for Copilot Chat to work. ",
147 "Please enable Copilot and try again."
148 ))
149 .into()));
150 };
151
152 let err = match copilot.read(cx).status() {
153 Status::Authorized => return Task::ready(Ok(())),
154 Status::Disabled => anyhow!(
155 "Copilot must be enabled for Copilot Chat to work. Please enable Copilot and try again."
156 ),
157 Status::Error(err) => anyhow!(format!(
158 "Received the following error while signing into Copilot: {err}"
159 )),
160 Status::Starting { task: _ } => anyhow!(
161 "Copilot is still starting, please wait for Copilot to start then try again"
162 ),
163 Status::Unauthorized => anyhow!(
164 "Unable to authorize with Copilot. Please make sure that you have an active Copilot and Copilot Chat subscription."
165 ),
166 Status::SignedOut { .. } => {
167 anyhow!("You have signed out of Copilot. Please sign in to Copilot and try again.")
168 }
169 Status::SigningIn { prompt: _ } => anyhow!("Still signing into Copilot..."),
170 };
171
172 Task::ready(Err(err.into()))
173 }
174
175 fn configuration_view(
176 &self,
177 _target_agent: language_model::ConfigurationViewTargetAgent,
178 _: &mut Window,
179 cx: &mut App,
180 ) -> AnyView {
181 cx.new(|cx| {
182 copilot_ui::ConfigurationView::new(
183 |cx| {
184 CopilotChat::global(cx)
185 .map(|m| m.read(cx).is_authenticated())
186 .unwrap_or(false)
187 },
188 copilot_ui::ConfigurationMode::Chat,
189 cx,
190 )
191 })
192 .into()
193 }
194
195 fn reset_credentials(&self, _cx: &mut App) -> Task<Result<()>> {
196 Task::ready(Err(anyhow!(
197 "Signing out of GitHub Copilot Chat is currently not supported."
198 )))
199 }
200}
201
202fn collect_tiktoken_messages(
203 request: LanguageModelRequest,
204) -> Vec<tiktoken_rs::ChatCompletionRequestMessage> {
205 request
206 .messages
207 .into_iter()
208 .map(|message| tiktoken_rs::ChatCompletionRequestMessage {
209 role: match message.role {
210 Role::User => "user".into(),
211 Role::Assistant => "assistant".into(),
212 Role::System => "system".into(),
213 },
214 content: Some(message.string_contents()),
215 name: None,
216 function_call: None,
217 })
218 .collect::<Vec<_>>()
219}
220
221pub struct CopilotChatLanguageModel {
222 model: CopilotChatModel,
223 request_limiter: RateLimiter,
224}
225
226impl LanguageModel for CopilotChatLanguageModel {
227 fn id(&self) -> LanguageModelId {
228 LanguageModelId::from(self.model.id().to_string())
229 }
230
231 fn name(&self) -> LanguageModelName {
232 LanguageModelName::from(self.model.display_name().to_string())
233 }
234
235 fn provider_id(&self) -> LanguageModelProviderId {
236 PROVIDER_ID
237 }
238
239 fn provider_name(&self) -> LanguageModelProviderName {
240 PROVIDER_NAME
241 }
242
243 fn supports_tools(&self) -> bool {
244 self.model.supports_tools()
245 }
246
247 fn supports_images(&self) -> bool {
248 self.model.supports_vision()
249 }
250
251 fn tool_input_format(&self) -> LanguageModelToolSchemaFormat {
252 match self.model.vendor() {
253 ModelVendor::OpenAI | ModelVendor::Anthropic => {
254 LanguageModelToolSchemaFormat::JsonSchema
255 }
256 ModelVendor::Google | ModelVendor::XAI | ModelVendor::Unknown => {
257 LanguageModelToolSchemaFormat::JsonSchemaSubset
258 }
259 }
260 }
261
262 fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
263 match choice {
264 LanguageModelToolChoice::Auto
265 | LanguageModelToolChoice::Any
266 | LanguageModelToolChoice::None => self.supports_tools(),
267 }
268 }
269
270 fn telemetry_id(&self) -> String {
271 format!("copilot_chat/{}", self.model.id())
272 }
273
274 fn max_token_count(&self) -> u64 {
275 self.model.max_token_count()
276 }
277
278 fn count_tokens(
279 &self,
280 request: LanguageModelRequest,
281 cx: &App,
282 ) -> BoxFuture<'static, Result<u64>> {
283 let model = self.model.clone();
284 cx.background_spawn(async move {
285 let messages = collect_tiktoken_messages(request);
286 // Copilot uses OpenAI tiktoken tokenizer for all it's model irrespective of the underlying provider(vendor).
287 let tokenizer_model = match model.tokenizer() {
288 Some("o200k_base") => "gpt-4o",
289 Some("cl100k_base") => "gpt-4",
290 _ => "gpt-4o",
291 };
292
293 tiktoken_rs::num_tokens_from_messages(tokenizer_model, &messages)
294 .map(|tokens| tokens as u64)
295 })
296 .boxed()
297 }
298
299 fn stream_completion(
300 &self,
301 request: LanguageModelRequest,
302 cx: &AsyncApp,
303 ) -> BoxFuture<
304 'static,
305 Result<
306 BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>,
307 LanguageModelCompletionError,
308 >,
309 > {
310 let bypass_rate_limit = request.bypass_rate_limit;
311 let is_user_initiated = request.intent.is_none_or(|intent| match intent {
312 CompletionIntent::UserPrompt
313 | CompletionIntent::ThreadContextSummarization
314 | CompletionIntent::InlineAssist
315 | CompletionIntent::TerminalInlineAssist
316 | CompletionIntent::GenerateGitCommitMessage => true,
317
318 CompletionIntent::ToolResults
319 | CompletionIntent::ThreadSummarization
320 | CompletionIntent::CreateFile
321 | CompletionIntent::EditFile => false,
322 });
323
324 if self.model.supports_response() {
325 let responses_request = into_copilot_responses(&self.model, request);
326 let request_limiter = self.request_limiter.clone();
327 let future = cx.spawn(async move |cx| {
328 let request =
329 CopilotChat::stream_response(responses_request, is_user_initiated, cx.clone());
330 request_limiter
331 .stream_with_bypass(
332 async move {
333 let stream = request.await?;
334 let mapper = CopilotResponsesEventMapper::new();
335 Ok(mapper.map_stream(stream).boxed())
336 },
337 bypass_rate_limit,
338 )
339 .await
340 });
341 return async move { Ok(future.await?.boxed()) }.boxed();
342 }
343
344 let copilot_request = match into_copilot_chat(&self.model, request) {
345 Ok(request) => request,
346 Err(err) => return futures::future::ready(Err(err.into())).boxed(),
347 };
348 let is_streaming = copilot_request.stream;
349
350 let request_limiter = self.request_limiter.clone();
351 let future = cx.spawn(async move |cx| {
352 let request =
353 CopilotChat::stream_completion(copilot_request, is_user_initiated, cx.clone());
354 request_limiter
355 .stream_with_bypass(
356 async move {
357 let response = request.await?;
358 Ok(map_to_language_model_completion_events(
359 response,
360 is_streaming,
361 ))
362 },
363 bypass_rate_limit,
364 )
365 .await
366 });
367 async move { Ok(future.await?.boxed()) }.boxed()
368 }
369}
370
371pub fn map_to_language_model_completion_events(
372 events: Pin<Box<dyn Send + Stream<Item = Result<ResponseEvent>>>>,
373 is_streaming: bool,
374) -> impl Stream<Item = Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
375 #[derive(Default)]
376 struct RawToolCall {
377 id: String,
378 name: String,
379 arguments: String,
380 thought_signature: Option<String>,
381 }
382
383 struct State {
384 events: Pin<Box<dyn Send + Stream<Item = Result<ResponseEvent>>>>,
385 tool_calls_by_index: HashMap<usize, RawToolCall>,
386 reasoning_opaque: Option<String>,
387 reasoning_text: Option<String>,
388 }
389
390 futures::stream::unfold(
391 State {
392 events,
393 tool_calls_by_index: HashMap::default(),
394 reasoning_opaque: None,
395 reasoning_text: None,
396 },
397 move |mut state| async move {
398 if let Some(event) = state.events.next().await {
399 match event {
400 Ok(event) => {
401 let Some(choice) = event.choices.first() else {
402 return Some((
403 vec![Err(anyhow!("Response contained no choices").into())],
404 state,
405 ));
406 };
407
408 let delta = if is_streaming {
409 choice.delta.as_ref()
410 } else {
411 choice.message.as_ref()
412 };
413
414 let Some(delta) = delta else {
415 return Some((
416 vec![Err(anyhow!("Response contained no delta").into())],
417 state,
418 ));
419 };
420
421 let mut events = Vec::new();
422 if let Some(content) = delta.content.clone() {
423 events.push(Ok(LanguageModelCompletionEvent::Text(content)));
424 }
425
426 // Capture reasoning data from the delta (e.g. for Gemini 3)
427 if let Some(opaque) = delta.reasoning_opaque.clone() {
428 state.reasoning_opaque = Some(opaque);
429 }
430 if let Some(text) = delta.reasoning_text.clone() {
431 state.reasoning_text = Some(text);
432 }
433
434 for (index, tool_call) in delta.tool_calls.iter().enumerate() {
435 let tool_index = tool_call.index.unwrap_or(index);
436 let entry = state.tool_calls_by_index.entry(tool_index).or_default();
437
438 if let Some(tool_id) = tool_call.id.clone() {
439 entry.id = tool_id;
440 }
441
442 if let Some(function) = tool_call.function.as_ref() {
443 if let Some(name) = function.name.clone() {
444 entry.name = name;
445 }
446
447 if let Some(arguments) = function.arguments.clone() {
448 entry.arguments.push_str(&arguments);
449 }
450
451 if let Some(thought_signature) = function.thought_signature.clone()
452 {
453 entry.thought_signature = Some(thought_signature);
454 }
455 }
456 }
457
458 if let Some(usage) = event.usage {
459 events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(
460 TokenUsage {
461 input_tokens: usage.prompt_tokens,
462 output_tokens: usage.completion_tokens,
463 cache_creation_input_tokens: 0,
464 cache_read_input_tokens: 0,
465 },
466 )));
467 }
468
469 match choice.finish_reason.as_deref() {
470 Some("stop") => {
471 events.push(Ok(LanguageModelCompletionEvent::Stop(
472 StopReason::EndTurn,
473 )));
474 }
475 Some("tool_calls") => {
476 // Gemini 3 models send reasoning_opaque/reasoning_text that must
477 // be preserved and sent back in subsequent requests. Emit as
478 // ReasoningDetails so the agent stores it in the message.
479 if state.reasoning_opaque.is_some()
480 || state.reasoning_text.is_some()
481 {
482 let mut details = serde_json::Map::new();
483 if let Some(opaque) = state.reasoning_opaque.take() {
484 details.insert(
485 "reasoning_opaque".to_string(),
486 serde_json::Value::String(opaque),
487 );
488 }
489 if let Some(text) = state.reasoning_text.take() {
490 details.insert(
491 "reasoning_text".to_string(),
492 serde_json::Value::String(text),
493 );
494 }
495 events.push(Ok(
496 LanguageModelCompletionEvent::ReasoningDetails(
497 serde_json::Value::Object(details),
498 ),
499 ));
500 }
501
502 events.extend(state.tool_calls_by_index.drain().map(
503 |(_, tool_call)| {
504 // The model can output an empty string
505 // to indicate the absence of arguments.
506 // When that happens, create an empty
507 // object instead.
508 let arguments = if tool_call.arguments.is_empty() {
509 Ok(serde_json::Value::Object(Default::default()))
510 } else {
511 serde_json::Value::from_str(&tool_call.arguments)
512 };
513 match arguments {
514 Ok(input) => Ok(LanguageModelCompletionEvent::ToolUse(
515 LanguageModelToolUse {
516 id: tool_call.id.into(),
517 name: tool_call.name.as_str().into(),
518 is_input_complete: true,
519 input,
520 raw_input: tool_call.arguments,
521 thought_signature: tool_call.thought_signature,
522 },
523 )),
524 Err(error) => Ok(
525 LanguageModelCompletionEvent::ToolUseJsonParseError {
526 id: tool_call.id.into(),
527 tool_name: tool_call.name.as_str().into(),
528 raw_input: tool_call.arguments.into(),
529 json_parse_error: error.to_string(),
530 },
531 ),
532 }
533 },
534 ));
535
536 events.push(Ok(LanguageModelCompletionEvent::Stop(
537 StopReason::ToolUse,
538 )));
539 }
540 Some(stop_reason) => {
541 log::error!("Unexpected Copilot Chat stop_reason: {stop_reason:?}");
542 events.push(Ok(LanguageModelCompletionEvent::Stop(
543 StopReason::EndTurn,
544 )));
545 }
546 None => {}
547 }
548
549 return Some((events, state));
550 }
551 Err(err) => return Some((vec![Err(anyhow!(err).into())], state)),
552 }
553 }
554
555 None
556 },
557 )
558 .flat_map(futures::stream::iter)
559}
560
561pub struct CopilotResponsesEventMapper {
562 pending_stop_reason: Option<StopReason>,
563}
564
565impl CopilotResponsesEventMapper {
566 pub fn new() -> Self {
567 Self {
568 pending_stop_reason: None,
569 }
570 }
571
572 pub fn map_stream(
573 mut self,
574 events: Pin<Box<dyn Send + Stream<Item = Result<copilot_responses::StreamEvent>>>>,
575 ) -> impl Stream<Item = Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
576 {
577 events.flat_map(move |event| {
578 futures::stream::iter(match event {
579 Ok(event) => self.map_event(event),
580 Err(error) => vec![Err(LanguageModelCompletionError::from(anyhow!(error)))],
581 })
582 })
583 }
584
585 fn map_event(
586 &mut self,
587 event: copilot_responses::StreamEvent,
588 ) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
589 match event {
590 copilot_responses::StreamEvent::OutputItemAdded { item, .. } => match item {
591 copilot_responses::ResponseOutputItem::Message { id, .. } => {
592 vec![Ok(LanguageModelCompletionEvent::StartMessage {
593 message_id: id,
594 })]
595 }
596 _ => Vec::new(),
597 },
598
599 copilot_responses::StreamEvent::OutputTextDelta { delta, .. } => {
600 if delta.is_empty() {
601 Vec::new()
602 } else {
603 vec![Ok(LanguageModelCompletionEvent::Text(delta))]
604 }
605 }
606
607 copilot_responses::StreamEvent::OutputItemDone { item, .. } => match item {
608 copilot_responses::ResponseOutputItem::Message { .. } => Vec::new(),
609 copilot_responses::ResponseOutputItem::FunctionCall {
610 call_id,
611 name,
612 arguments,
613 thought_signature,
614 ..
615 } => {
616 let mut events = Vec::new();
617 match serde_json::from_str::<serde_json::Value>(&arguments) {
618 Ok(input) => events.push(Ok(LanguageModelCompletionEvent::ToolUse(
619 LanguageModelToolUse {
620 id: call_id.into(),
621 name: name.as_str().into(),
622 is_input_complete: true,
623 input,
624 raw_input: arguments.clone(),
625 thought_signature,
626 },
627 ))),
628 Err(error) => {
629 events.push(Ok(LanguageModelCompletionEvent::ToolUseJsonParseError {
630 id: call_id.into(),
631 tool_name: name.as_str().into(),
632 raw_input: arguments.clone().into(),
633 json_parse_error: error.to_string(),
634 }))
635 }
636 }
637 // Record that we already emitted a tool-use stop so we can avoid duplicating
638 // a Stop event on Completed.
639 self.pending_stop_reason = Some(StopReason::ToolUse);
640 events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::ToolUse)));
641 events
642 }
643 copilot_responses::ResponseOutputItem::Reasoning {
644 summary,
645 encrypted_content,
646 ..
647 } => {
648 let mut events = Vec::new();
649
650 if let Some(blocks) = summary {
651 let mut text = String::new();
652 for block in blocks {
653 text.push_str(&block.text);
654 }
655 if !text.is_empty() {
656 events.push(Ok(LanguageModelCompletionEvent::Thinking {
657 text,
658 signature: None,
659 }));
660 }
661 }
662
663 if let Some(data) = encrypted_content {
664 events.push(Ok(LanguageModelCompletionEvent::RedactedThinking { data }));
665 }
666
667 events
668 }
669 },
670
671 copilot_responses::StreamEvent::Completed { response } => {
672 let mut events = Vec::new();
673 if let Some(usage) = response.usage {
674 events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
675 input_tokens: usage.input_tokens.unwrap_or(0),
676 output_tokens: usage.output_tokens.unwrap_or(0),
677 cache_creation_input_tokens: 0,
678 cache_read_input_tokens: 0,
679 })));
680 }
681 if self.pending_stop_reason.take() != Some(StopReason::ToolUse) {
682 events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::EndTurn)));
683 }
684 events
685 }
686
687 copilot_responses::StreamEvent::Incomplete { response } => {
688 let reason = response
689 .incomplete_details
690 .as_ref()
691 .and_then(|details| details.reason.as_ref());
692 let stop_reason = match reason {
693 Some(copilot_responses::IncompleteReason::MaxOutputTokens) => {
694 StopReason::MaxTokens
695 }
696 Some(copilot_responses::IncompleteReason::ContentFilter) => StopReason::Refusal,
697 _ => self
698 .pending_stop_reason
699 .take()
700 .unwrap_or(StopReason::EndTurn),
701 };
702
703 let mut events = Vec::new();
704 if let Some(usage) = response.usage {
705 events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
706 input_tokens: usage.input_tokens.unwrap_or(0),
707 output_tokens: usage.output_tokens.unwrap_or(0),
708 cache_creation_input_tokens: 0,
709 cache_read_input_tokens: 0,
710 })));
711 }
712 events.push(Ok(LanguageModelCompletionEvent::Stop(stop_reason)));
713 events
714 }
715
716 copilot_responses::StreamEvent::Failed { response } => {
717 let provider = PROVIDER_NAME;
718 let (status_code, message) = match response.error {
719 Some(error) => {
720 let status_code = StatusCode::from_str(&error.code)
721 .unwrap_or(StatusCode::INTERNAL_SERVER_ERROR);
722 (status_code, error.message)
723 }
724 None => (
725 StatusCode::INTERNAL_SERVER_ERROR,
726 "response.failed".to_string(),
727 ),
728 };
729 vec![Err(LanguageModelCompletionError::HttpResponseError {
730 provider,
731 status_code,
732 message,
733 })]
734 }
735
736 copilot_responses::StreamEvent::GenericError { error } => vec![Err(
737 LanguageModelCompletionError::Other(anyhow!(format!("{error:?}"))),
738 )],
739
740 copilot_responses::StreamEvent::Created { .. }
741 | copilot_responses::StreamEvent::Unknown => Vec::new(),
742 }
743 }
744}
745
746fn into_copilot_chat(
747 model: &CopilotChatModel,
748 request: LanguageModelRequest,
749) -> Result<CopilotChatRequest> {
750 let mut request_messages: Vec<LanguageModelRequestMessage> = Vec::new();
751 for message in request.messages {
752 if let Some(last_message) = request_messages.last_mut() {
753 if last_message.role == message.role {
754 last_message.content.extend(message.content);
755 } else {
756 request_messages.push(message);
757 }
758 } else {
759 request_messages.push(message);
760 }
761 }
762
763 let mut messages: Vec<ChatMessage> = Vec::new();
764 for message in request_messages {
765 match message.role {
766 Role::User => {
767 for content in &message.content {
768 if let MessageContent::ToolResult(tool_result) = content {
769 let content = match &tool_result.content {
770 LanguageModelToolResultContent::Text(text) => text.to_string().into(),
771 LanguageModelToolResultContent::Image(image) => {
772 if model.supports_vision() {
773 ChatMessageContent::Multipart(vec![ChatMessagePart::Image {
774 image_url: ImageUrl {
775 url: image.to_base64_url(),
776 },
777 }])
778 } else {
779 debug_panic!(
780 "This should be caught at {} level",
781 tool_result.tool_name
782 );
783 "[Tool responded with an image, but this model does not support vision]".to_string().into()
784 }
785 }
786 };
787
788 messages.push(ChatMessage::Tool {
789 tool_call_id: tool_result.tool_use_id.to_string(),
790 content,
791 });
792 }
793 }
794
795 let mut content_parts = Vec::new();
796 for content in &message.content {
797 match content {
798 MessageContent::Text(text) | MessageContent::Thinking { text, .. }
799 if !text.is_empty() =>
800 {
801 if let Some(ChatMessagePart::Text { text: text_content }) =
802 content_parts.last_mut()
803 {
804 text_content.push_str(text);
805 } else {
806 content_parts.push(ChatMessagePart::Text {
807 text: text.to_string(),
808 });
809 }
810 }
811 MessageContent::Image(image) if model.supports_vision() => {
812 content_parts.push(ChatMessagePart::Image {
813 image_url: ImageUrl {
814 url: image.to_base64_url(),
815 },
816 });
817 }
818 _ => {}
819 }
820 }
821
822 if !content_parts.is_empty() {
823 messages.push(ChatMessage::User {
824 content: content_parts.into(),
825 });
826 }
827 }
828 Role::Assistant => {
829 let mut tool_calls = Vec::new();
830 for content in &message.content {
831 if let MessageContent::ToolUse(tool_use) = content {
832 tool_calls.push(ToolCall {
833 id: tool_use.id.to_string(),
834 content: ToolCallContent::Function {
835 function: FunctionContent {
836 name: tool_use.name.to_string(),
837 arguments: serde_json::to_string(&tool_use.input)?,
838 thought_signature: tool_use.thought_signature.clone(),
839 },
840 },
841 });
842 }
843 }
844
845 let text_content = {
846 let mut buffer = String::new();
847 for string in message.content.iter().filter_map(|content| match content {
848 MessageContent::Text(text) | MessageContent::Thinking { text, .. } => {
849 Some(text.as_str())
850 }
851 MessageContent::ToolUse(_)
852 | MessageContent::RedactedThinking(_)
853 | MessageContent::ToolResult(_)
854 | MessageContent::Image(_) => None,
855 }) {
856 buffer.push_str(string);
857 }
858
859 buffer
860 };
861
862 // Extract reasoning_opaque and reasoning_text from reasoning_details
863 let (reasoning_opaque, reasoning_text) =
864 if let Some(details) = &message.reasoning_details {
865 let opaque = details
866 .get("reasoning_opaque")
867 .and_then(|v| v.as_str())
868 .map(|s| s.to_string());
869 let text = details
870 .get("reasoning_text")
871 .and_then(|v| v.as_str())
872 .map(|s| s.to_string());
873 (opaque, text)
874 } else {
875 (None, None)
876 };
877
878 messages.push(ChatMessage::Assistant {
879 content: if text_content.is_empty() {
880 ChatMessageContent::empty()
881 } else {
882 text_content.into()
883 },
884 tool_calls,
885 reasoning_opaque,
886 reasoning_text,
887 });
888 }
889 Role::System => messages.push(ChatMessage::System {
890 content: message.string_contents(),
891 }),
892 }
893 }
894
895 let tools = request
896 .tools
897 .iter()
898 .map(|tool| Tool::Function {
899 function: Function {
900 name: tool.name.clone(),
901 description: tool.description.clone(),
902 parameters: tool.input_schema.clone(),
903 },
904 })
905 .collect::<Vec<_>>();
906
907 Ok(CopilotChatRequest {
908 intent: true,
909 n: 1,
910 stream: model.uses_streaming(),
911 temperature: 0.1,
912 model: model.id().to_string(),
913 messages,
914 tools,
915 tool_choice: request.tool_choice.map(|choice| match choice {
916 LanguageModelToolChoice::Auto => ToolChoice::Auto,
917 LanguageModelToolChoice::Any => ToolChoice::Any,
918 LanguageModelToolChoice::None => ToolChoice::None,
919 }),
920 })
921}
922
923fn into_copilot_responses(
924 model: &CopilotChatModel,
925 request: LanguageModelRequest,
926) -> copilot_responses::Request {
927 use copilot_responses as responses;
928
929 let LanguageModelRequest {
930 thread_id: _,
931 prompt_id: _,
932 intent: _,
933 messages,
934 tools,
935 tool_choice,
936 stop: _,
937 temperature,
938 thinking_allowed: _,
939 bypass_rate_limit: _,
940 } = request;
941
942 let mut input_items: Vec<responses::ResponseInputItem> = Vec::new();
943
944 for message in messages {
945 match message.role {
946 Role::User => {
947 for content in &message.content {
948 if let MessageContent::ToolResult(tool_result) = content {
949 let output = if let Some(out) = &tool_result.output {
950 match out {
951 serde_json::Value::String(s) => {
952 responses::ResponseFunctionOutput::Text(s.clone())
953 }
954 serde_json::Value::Null => {
955 responses::ResponseFunctionOutput::Text(String::new())
956 }
957 other => responses::ResponseFunctionOutput::Text(other.to_string()),
958 }
959 } else {
960 match &tool_result.content {
961 LanguageModelToolResultContent::Text(text) => {
962 responses::ResponseFunctionOutput::Text(text.to_string())
963 }
964 LanguageModelToolResultContent::Image(image) => {
965 if model.supports_vision() {
966 responses::ResponseFunctionOutput::Content(vec![
967 responses::ResponseInputContent::InputImage {
968 image_url: Some(image.to_base64_url()),
969 detail: Default::default(),
970 },
971 ])
972 } else {
973 debug_panic!(
974 "This should be caught at {} level",
975 tool_result.tool_name
976 );
977 responses::ResponseFunctionOutput::Text(
978 "[Tool responded with an image, but this model does not support vision]".into(),
979 )
980 }
981 }
982 }
983 };
984
985 input_items.push(responses::ResponseInputItem::FunctionCallOutput {
986 call_id: tool_result.tool_use_id.to_string(),
987 output,
988 status: None,
989 });
990 }
991 }
992
993 let mut parts: Vec<responses::ResponseInputContent> = Vec::new();
994 for content in &message.content {
995 match content {
996 MessageContent::Text(text) => {
997 parts.push(responses::ResponseInputContent::InputText {
998 text: text.clone(),
999 });
1000 }
1001
1002 MessageContent::Image(image) => {
1003 if model.supports_vision() {
1004 parts.push(responses::ResponseInputContent::InputImage {
1005 image_url: Some(image.to_base64_url()),
1006 detail: Default::default(),
1007 });
1008 }
1009 }
1010 _ => {}
1011 }
1012 }
1013
1014 if !parts.is_empty() {
1015 input_items.push(responses::ResponseInputItem::Message {
1016 role: "user".into(),
1017 content: Some(parts),
1018 status: None,
1019 });
1020 }
1021 }
1022
1023 Role::Assistant => {
1024 for content in &message.content {
1025 if let MessageContent::ToolUse(tool_use) = content {
1026 input_items.push(responses::ResponseInputItem::FunctionCall {
1027 call_id: tool_use.id.to_string(),
1028 name: tool_use.name.to_string(),
1029 arguments: tool_use.raw_input.clone(),
1030 status: None,
1031 thought_signature: tool_use.thought_signature.clone(),
1032 });
1033 }
1034 }
1035
1036 for content in &message.content {
1037 if let MessageContent::RedactedThinking(data) = content {
1038 input_items.push(responses::ResponseInputItem::Reasoning {
1039 id: None,
1040 summary: Vec::new(),
1041 encrypted_content: data.clone(),
1042 });
1043 }
1044 }
1045
1046 let mut parts: Vec<responses::ResponseInputContent> = Vec::new();
1047 for content in &message.content {
1048 match content {
1049 MessageContent::Text(text) => {
1050 parts.push(responses::ResponseInputContent::OutputText {
1051 text: text.clone(),
1052 });
1053 }
1054 MessageContent::Image(_) => {
1055 parts.push(responses::ResponseInputContent::OutputText {
1056 text: "[image omitted]".to_string(),
1057 });
1058 }
1059 _ => {}
1060 }
1061 }
1062
1063 if !parts.is_empty() {
1064 input_items.push(responses::ResponseInputItem::Message {
1065 role: "assistant".into(),
1066 content: Some(parts),
1067 status: Some("completed".into()),
1068 });
1069 }
1070 }
1071
1072 Role::System => {
1073 let mut parts: Vec<responses::ResponseInputContent> = Vec::new();
1074 for content in &message.content {
1075 if let MessageContent::Text(text) = content {
1076 parts.push(responses::ResponseInputContent::InputText {
1077 text: text.clone(),
1078 });
1079 }
1080 }
1081
1082 if !parts.is_empty() {
1083 input_items.push(responses::ResponseInputItem::Message {
1084 role: "system".into(),
1085 content: Some(parts),
1086 status: None,
1087 });
1088 }
1089 }
1090 }
1091 }
1092
1093 let converted_tools: Vec<responses::ToolDefinition> = tools
1094 .into_iter()
1095 .map(|tool| responses::ToolDefinition::Function {
1096 name: tool.name,
1097 description: Some(tool.description),
1098 parameters: Some(tool.input_schema),
1099 strict: None,
1100 })
1101 .collect();
1102
1103 let mapped_tool_choice = tool_choice.map(|choice| match choice {
1104 LanguageModelToolChoice::Auto => responses::ToolChoice::Auto,
1105 LanguageModelToolChoice::Any => responses::ToolChoice::Any,
1106 LanguageModelToolChoice::None => responses::ToolChoice::None,
1107 });
1108
1109 responses::Request {
1110 model: model.id().to_string(),
1111 input: input_items,
1112 stream: model.uses_streaming(),
1113 temperature,
1114 tools: converted_tools,
1115 tool_choice: mapped_tool_choice,
1116 reasoning: None, // We would need to add support for setting from user settings.
1117 include: Some(vec![
1118 copilot_responses::ResponseIncludable::ReasoningEncryptedContent,
1119 ]),
1120 }
1121}
1122
1123#[cfg(test)]
1124mod tests {
1125 use super::*;
1126 use copilot_chat::responses;
1127 use futures::StreamExt;
1128
1129 fn map_events(events: Vec<responses::StreamEvent>) -> Vec<LanguageModelCompletionEvent> {
1130 futures::executor::block_on(async {
1131 CopilotResponsesEventMapper::new()
1132 .map_stream(Box::pin(futures::stream::iter(events.into_iter().map(Ok))))
1133 .collect::<Vec<_>>()
1134 .await
1135 .into_iter()
1136 .map(Result::unwrap)
1137 .collect()
1138 })
1139 }
1140
1141 #[test]
1142 fn responses_stream_maps_text_and_usage() {
1143 let events = vec![
1144 responses::StreamEvent::OutputItemAdded {
1145 output_index: 0,
1146 sequence_number: None,
1147 item: responses::ResponseOutputItem::Message {
1148 id: "msg_1".into(),
1149 role: "assistant".into(),
1150 content: Some(Vec::new()),
1151 },
1152 },
1153 responses::StreamEvent::OutputTextDelta {
1154 item_id: "msg_1".into(),
1155 output_index: 0,
1156 delta: "Hello".into(),
1157 },
1158 responses::StreamEvent::Completed {
1159 response: responses::Response {
1160 usage: Some(responses::ResponseUsage {
1161 input_tokens: Some(5),
1162 output_tokens: Some(3),
1163 total_tokens: Some(8),
1164 }),
1165 ..Default::default()
1166 },
1167 },
1168 ];
1169
1170 let mapped = map_events(events);
1171 assert!(matches!(
1172 mapped[0],
1173 LanguageModelCompletionEvent::StartMessage { ref message_id } if message_id == "msg_1"
1174 ));
1175 assert!(matches!(
1176 mapped[1],
1177 LanguageModelCompletionEvent::Text(ref text) if text == "Hello"
1178 ));
1179 assert!(matches!(
1180 mapped[2],
1181 LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
1182 input_tokens: 5,
1183 output_tokens: 3,
1184 ..
1185 })
1186 ));
1187 assert!(matches!(
1188 mapped[3],
1189 LanguageModelCompletionEvent::Stop(StopReason::EndTurn)
1190 ));
1191 }
1192
1193 #[test]
1194 fn responses_stream_maps_tool_calls() {
1195 let events = vec![responses::StreamEvent::OutputItemDone {
1196 output_index: 0,
1197 sequence_number: None,
1198 item: responses::ResponseOutputItem::FunctionCall {
1199 id: Some("fn_1".into()),
1200 call_id: "call_1".into(),
1201 name: "do_it".into(),
1202 arguments: "{\"x\":1}".into(),
1203 status: None,
1204 thought_signature: None,
1205 },
1206 }];
1207
1208 let mapped = map_events(events);
1209 assert!(matches!(
1210 mapped[0],
1211 LanguageModelCompletionEvent::ToolUse(ref use_) if use_.id.to_string() == "call_1" && use_.name.as_ref() == "do_it"
1212 ));
1213 assert!(matches!(
1214 mapped[1],
1215 LanguageModelCompletionEvent::Stop(StopReason::ToolUse)
1216 ));
1217 }
1218
1219 #[test]
1220 fn responses_stream_handles_json_parse_error() {
1221 let events = vec![responses::StreamEvent::OutputItemDone {
1222 output_index: 0,
1223 sequence_number: None,
1224 item: responses::ResponseOutputItem::FunctionCall {
1225 id: Some("fn_1".into()),
1226 call_id: "call_1".into(),
1227 name: "do_it".into(),
1228 arguments: "{not json}".into(),
1229 status: None,
1230 thought_signature: None,
1231 },
1232 }];
1233
1234 let mapped = map_events(events);
1235 assert!(matches!(
1236 mapped[0],
1237 LanguageModelCompletionEvent::ToolUseJsonParseError { ref id, ref tool_name, .. }
1238 if id.to_string() == "call_1" && tool_name.as_ref() == "do_it"
1239 ));
1240 assert!(matches!(
1241 mapped[1],
1242 LanguageModelCompletionEvent::Stop(StopReason::ToolUse)
1243 ));
1244 }
1245
1246 #[test]
1247 fn responses_stream_maps_reasoning_summary_and_encrypted_content() {
1248 let events = vec![responses::StreamEvent::OutputItemDone {
1249 output_index: 0,
1250 sequence_number: None,
1251 item: responses::ResponseOutputItem::Reasoning {
1252 id: "r1".into(),
1253 summary: Some(vec![responses::ResponseReasoningItem {
1254 kind: "summary_text".into(),
1255 text: "Chain".into(),
1256 }]),
1257 encrypted_content: Some("ENC".into()),
1258 },
1259 }];
1260
1261 let mapped = map_events(events);
1262 assert!(matches!(
1263 mapped[0],
1264 LanguageModelCompletionEvent::Thinking { ref text, signature: None } if text == "Chain"
1265 ));
1266 assert!(matches!(
1267 mapped[1],
1268 LanguageModelCompletionEvent::RedactedThinking { ref data } if data == "ENC"
1269 ));
1270 }
1271
1272 #[test]
1273 fn responses_stream_handles_incomplete_max_tokens() {
1274 let events = vec![responses::StreamEvent::Incomplete {
1275 response: responses::Response {
1276 usage: Some(responses::ResponseUsage {
1277 input_tokens: Some(10),
1278 output_tokens: Some(0),
1279 total_tokens: Some(10),
1280 }),
1281 incomplete_details: Some(responses::IncompleteDetails {
1282 reason: Some(responses::IncompleteReason::MaxOutputTokens),
1283 }),
1284 ..Default::default()
1285 },
1286 }];
1287
1288 let mapped = map_events(events);
1289 assert!(matches!(
1290 mapped[0],
1291 LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
1292 input_tokens: 10,
1293 output_tokens: 0,
1294 ..
1295 })
1296 ));
1297 assert!(matches!(
1298 mapped[1],
1299 LanguageModelCompletionEvent::Stop(StopReason::MaxTokens)
1300 ));
1301 }
1302
1303 #[test]
1304 fn responses_stream_handles_incomplete_content_filter() {
1305 let events = vec![responses::StreamEvent::Incomplete {
1306 response: responses::Response {
1307 usage: None,
1308 incomplete_details: Some(responses::IncompleteDetails {
1309 reason: Some(responses::IncompleteReason::ContentFilter),
1310 }),
1311 ..Default::default()
1312 },
1313 }];
1314
1315 let mapped = map_events(events);
1316 assert!(matches!(
1317 mapped.last().unwrap(),
1318 LanguageModelCompletionEvent::Stop(StopReason::Refusal)
1319 ));
1320 }
1321
1322 #[test]
1323 fn responses_stream_completed_no_duplicate_after_tool_use() {
1324 let events = vec![
1325 responses::StreamEvent::OutputItemDone {
1326 output_index: 0,
1327 sequence_number: None,
1328 item: responses::ResponseOutputItem::FunctionCall {
1329 id: Some("fn_1".into()),
1330 call_id: "call_1".into(),
1331 name: "do_it".into(),
1332 arguments: "{}".into(),
1333 status: None,
1334 thought_signature: None,
1335 },
1336 },
1337 responses::StreamEvent::Completed {
1338 response: responses::Response::default(),
1339 },
1340 ];
1341
1342 let mapped = map_events(events);
1343
1344 let mut stop_count = 0usize;
1345 let mut saw_tool_use_stop = false;
1346 for event in mapped {
1347 if let LanguageModelCompletionEvent::Stop(reason) = event {
1348 stop_count += 1;
1349 if matches!(reason, StopReason::ToolUse) {
1350 saw_tool_use_stop = true;
1351 }
1352 }
1353 }
1354 assert_eq!(stop_count, 1, "should emit exactly one Stop event");
1355 assert!(saw_tool_use_stop, "Stop reason should be ToolUse");
1356 }
1357
1358 #[test]
1359 fn responses_stream_failed_maps_http_response_error() {
1360 let events = vec![responses::StreamEvent::Failed {
1361 response: responses::Response {
1362 error: Some(responses::ResponseError {
1363 code: "429".into(),
1364 message: "too many requests".into(),
1365 }),
1366 ..Default::default()
1367 },
1368 }];
1369
1370 let mapped_results = futures::executor::block_on(async {
1371 CopilotResponsesEventMapper::new()
1372 .map_stream(Box::pin(futures::stream::iter(events.into_iter().map(Ok))))
1373 .collect::<Vec<_>>()
1374 .await
1375 });
1376
1377 assert_eq!(mapped_results.len(), 1);
1378 match &mapped_results[0] {
1379 Err(LanguageModelCompletionError::HttpResponseError {
1380 status_code,
1381 message,
1382 ..
1383 }) => {
1384 assert_eq!(*status_code, http_client::StatusCode::TOO_MANY_REQUESTS);
1385 assert_eq!(message, "too many requests");
1386 }
1387 other => panic!("expected HttpResponseError, got {:?}", other),
1388 }
1389 }
1390
1391 #[test]
1392 fn chat_completions_stream_maps_reasoning_data() {
1393 use copilot_chat::{
1394 FunctionChunk, ResponseChoice, ResponseDelta, ResponseEvent, Role, ToolCallChunk,
1395 };
1396
1397 let events = vec![
1398 ResponseEvent {
1399 choices: vec![ResponseChoice {
1400 index: Some(0),
1401 finish_reason: None,
1402 delta: Some(ResponseDelta {
1403 content: None,
1404 role: Some(Role::Assistant),
1405 tool_calls: vec![ToolCallChunk {
1406 index: Some(0),
1407 id: Some("call_abc123".to_string()),
1408 function: Some(FunctionChunk {
1409 name: Some("list_directory".to_string()),
1410 arguments: Some("{\"path\":\"test\"}".to_string()),
1411 thought_signature: None,
1412 }),
1413 }],
1414 reasoning_opaque: Some("encrypted_reasoning_token_xyz".to_string()),
1415 reasoning_text: Some("Let me check the directory".to_string()),
1416 }),
1417 message: None,
1418 }],
1419 id: "chatcmpl-123".to_string(),
1420 usage: None,
1421 },
1422 ResponseEvent {
1423 choices: vec![ResponseChoice {
1424 index: Some(0),
1425 finish_reason: Some("tool_calls".to_string()),
1426 delta: Some(ResponseDelta {
1427 content: None,
1428 role: None,
1429 tool_calls: vec![],
1430 reasoning_opaque: None,
1431 reasoning_text: None,
1432 }),
1433 message: None,
1434 }],
1435 id: "chatcmpl-123".to_string(),
1436 usage: None,
1437 },
1438 ];
1439
1440 let mapped = futures::executor::block_on(async {
1441 map_to_language_model_completion_events(
1442 Box::pin(futures::stream::iter(events.into_iter().map(Ok))),
1443 true,
1444 )
1445 .collect::<Vec<_>>()
1446 .await
1447 });
1448
1449 let mut has_reasoning_details = false;
1450 let mut has_tool_use = false;
1451 let mut reasoning_opaque_value: Option<String> = None;
1452 let mut reasoning_text_value: Option<String> = None;
1453
1454 for event_result in mapped {
1455 match event_result {
1456 Ok(LanguageModelCompletionEvent::ReasoningDetails(details)) => {
1457 has_reasoning_details = true;
1458 reasoning_opaque_value = details
1459 .get("reasoning_opaque")
1460 .and_then(|v| v.as_str())
1461 .map(|s| s.to_string());
1462 reasoning_text_value = details
1463 .get("reasoning_text")
1464 .and_then(|v| v.as_str())
1465 .map(|s| s.to_string());
1466 }
1467 Ok(LanguageModelCompletionEvent::ToolUse(tool_use)) => {
1468 has_tool_use = true;
1469 assert_eq!(tool_use.id.to_string(), "call_abc123");
1470 assert_eq!(tool_use.name.as_ref(), "list_directory");
1471 }
1472 _ => {}
1473 }
1474 }
1475
1476 assert!(
1477 has_reasoning_details,
1478 "Should emit ReasoningDetails event for Gemini 3 reasoning"
1479 );
1480 assert!(has_tool_use, "Should emit ToolUse event");
1481 assert_eq!(
1482 reasoning_opaque_value,
1483 Some("encrypted_reasoning_token_xyz".to_string()),
1484 "Should capture reasoning_opaque"
1485 );
1486 assert_eq!(
1487 reasoning_text_value,
1488 Some("Let me check the directory".to_string()),
1489 "Should capture reasoning_text"
1490 );
1491 }
1492}