1use std::pin::Pin;
2use std::str::FromStr as _;
3use std::sync::Arc;
4
5use anyhow::{Result, anyhow};
6use cloud_llm_client::CompletionIntent;
7use collections::HashMap;
8use copilot::copilot_chat::{
9 ChatMessage, ChatMessageContent, ChatMessagePart, CopilotChat, ImageUrl,
10 Model as CopilotChatModel, ModelVendor, Request as CopilotChatRequest, ResponseEvent, Tool,
11 ToolCall,
12};
13use copilot::{Copilot, Status};
14use futures::future::BoxFuture;
15use futures::stream::BoxStream;
16use futures::{FutureExt, Stream, StreamExt};
17use gpui::{Action, AnyView, App, AsyncApp, Entity, Render, Subscription, Task, svg};
18use http_client::StatusCode;
19use language::language_settings::all_language_settings;
20use language_model::{
21 AuthenticateError, LanguageModel, LanguageModelCompletionError, LanguageModelCompletionEvent,
22 LanguageModelId, LanguageModelName, LanguageModelProvider, LanguageModelProviderId,
23 LanguageModelProviderName, LanguageModelProviderState, LanguageModelRequest,
24 LanguageModelRequestMessage, LanguageModelToolChoice, LanguageModelToolResultContent,
25 LanguageModelToolSchemaFormat, LanguageModelToolUse, MessageContent, RateLimiter, Role,
26 StopReason, TokenUsage,
27};
28use settings::SettingsStore;
29use ui::{CommonAnimationExt, prelude::*};
30use util::debug_panic;
31
32use crate::ui::ConfiguredApiCard;
33
34const PROVIDER_ID: LanguageModelProviderId = LanguageModelProviderId::new("copilot_chat");
35const PROVIDER_NAME: LanguageModelProviderName =
36 LanguageModelProviderName::new("GitHub Copilot Chat");
37
38pub struct CopilotChatLanguageModelProvider {
39 state: Entity<State>,
40}
41
42pub struct State {
43 _copilot_chat_subscription: Option<Subscription>,
44 _settings_subscription: Subscription,
45}
46
47impl State {
48 fn is_authenticated(&self, cx: &App) -> bool {
49 CopilotChat::global(cx)
50 .map(|m| m.read(cx).is_authenticated())
51 .unwrap_or(false)
52 }
53}
54
55impl CopilotChatLanguageModelProvider {
56 pub fn new(cx: &mut App) -> Self {
57 let state = cx.new(|cx| {
58 let copilot_chat_subscription = CopilotChat::global(cx)
59 .map(|copilot_chat| cx.observe(&copilot_chat, |_, _, cx| cx.notify()));
60 State {
61 _copilot_chat_subscription: copilot_chat_subscription,
62 _settings_subscription: cx.observe_global::<SettingsStore>(|_, cx| {
63 if let Some(copilot_chat) = CopilotChat::global(cx) {
64 let language_settings = all_language_settings(None, cx);
65 let configuration = copilot::copilot_chat::CopilotChatConfiguration {
66 enterprise_uri: language_settings
67 .edit_predictions
68 .copilot
69 .enterprise_uri
70 .clone(),
71 };
72 copilot_chat.update(cx, |chat, cx| {
73 chat.set_configuration(configuration, cx);
74 });
75 }
76 cx.notify();
77 }),
78 }
79 });
80
81 Self { state }
82 }
83
84 fn create_language_model(&self, model: CopilotChatModel) -> Arc<dyn LanguageModel> {
85 Arc::new(CopilotChatLanguageModel {
86 model,
87 request_limiter: RateLimiter::new(4),
88 })
89 }
90}
91
92impl LanguageModelProviderState for CopilotChatLanguageModelProvider {
93 type ObservableEntity = State;
94
95 fn observable_entity(&self) -> Option<Entity<Self::ObservableEntity>> {
96 Some(self.state.clone())
97 }
98}
99
100impl LanguageModelProvider for CopilotChatLanguageModelProvider {
101 fn id(&self) -> LanguageModelProviderId {
102 PROVIDER_ID
103 }
104
105 fn name(&self) -> LanguageModelProviderName {
106 PROVIDER_NAME
107 }
108
109 fn icon(&self) -> IconName {
110 IconName::Copilot
111 }
112
113 fn default_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
114 let models = CopilotChat::global(cx).and_then(|m| m.read(cx).models())?;
115 models
116 .first()
117 .map(|model| self.create_language_model(model.clone()))
118 }
119
120 fn default_fast_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
121 // The default model should be Copilot Chat's 'base model', which is likely a relatively fast
122 // model (e.g. 4o) and a sensible choice when considering premium requests
123 self.default_model(cx)
124 }
125
126 fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
127 let Some(models) = CopilotChat::global(cx).and_then(|m| m.read(cx).models()) else {
128 return Vec::new();
129 };
130 models
131 .iter()
132 .map(|model| self.create_language_model(model.clone()))
133 .collect()
134 }
135
136 fn is_authenticated(&self, cx: &App) -> bool {
137 self.state.read(cx).is_authenticated(cx)
138 }
139
140 fn authenticate(&self, cx: &mut App) -> Task<Result<(), AuthenticateError>> {
141 if self.is_authenticated(cx) {
142 return Task::ready(Ok(()));
143 };
144
145 let Some(copilot) = Copilot::global(cx) else {
146 return Task::ready(Err(anyhow!(concat!(
147 "Copilot must be enabled for Copilot Chat to work. ",
148 "Please enable Copilot and try again."
149 ))
150 .into()));
151 };
152
153 let err = match copilot.read(cx).status() {
154 Status::Authorized => return Task::ready(Ok(())),
155 Status::Disabled => anyhow!(
156 "Copilot must be enabled for Copilot Chat to work. Please enable Copilot and try again."
157 ),
158 Status::Error(err) => anyhow!(format!(
159 "Received the following error while signing into Copilot: {err}"
160 )),
161 Status::Starting { task: _ } => anyhow!(
162 "Copilot is still starting, please wait for Copilot to start then try again"
163 ),
164 Status::Unauthorized => anyhow!(
165 "Unable to authorize with Copilot. Please make sure that you have an active Copilot and Copilot Chat subscription."
166 ),
167 Status::SignedOut { .. } => {
168 anyhow!("You have signed out of Copilot. Please sign in to Copilot and try again.")
169 }
170 Status::SigningIn { prompt: _ } => anyhow!("Still signing into Copilot..."),
171 };
172
173 Task::ready(Err(err.into()))
174 }
175
176 fn configuration_view(
177 &self,
178 _target_agent: language_model::ConfigurationViewTargetAgent,
179 _: &mut Window,
180 cx: &mut App,
181 ) -> AnyView {
182 let state = self.state.clone();
183 cx.new(|cx| ConfigurationView::new(state, cx)).into()
184 }
185
186 fn reset_credentials(&self, _cx: &mut App) -> Task<Result<()>> {
187 Task::ready(Err(anyhow!(
188 "Signing out of GitHub Copilot Chat is currently not supported."
189 )))
190 }
191}
192
193fn collect_tiktoken_messages(
194 request: LanguageModelRequest,
195) -> Vec<tiktoken_rs::ChatCompletionRequestMessage> {
196 request
197 .messages
198 .into_iter()
199 .map(|message| tiktoken_rs::ChatCompletionRequestMessage {
200 role: match message.role {
201 Role::User => "user".into(),
202 Role::Assistant => "assistant".into(),
203 Role::System => "system".into(),
204 },
205 content: Some(message.string_contents()),
206 name: None,
207 function_call: None,
208 })
209 .collect::<Vec<_>>()
210}
211
212pub struct CopilotChatLanguageModel {
213 model: CopilotChatModel,
214 request_limiter: RateLimiter,
215}
216
217impl LanguageModel for CopilotChatLanguageModel {
218 fn id(&self) -> LanguageModelId {
219 LanguageModelId::from(self.model.id().to_string())
220 }
221
222 fn name(&self) -> LanguageModelName {
223 LanguageModelName::from(self.model.display_name().to_string())
224 }
225
226 fn provider_id(&self) -> LanguageModelProviderId {
227 PROVIDER_ID
228 }
229
230 fn provider_name(&self) -> LanguageModelProviderName {
231 PROVIDER_NAME
232 }
233
234 fn supports_tools(&self) -> bool {
235 self.model.supports_tools()
236 }
237
238 fn supports_images(&self) -> bool {
239 self.model.supports_vision()
240 }
241
242 fn tool_input_format(&self) -> LanguageModelToolSchemaFormat {
243 match self.model.vendor() {
244 ModelVendor::OpenAI | ModelVendor::Anthropic => {
245 LanguageModelToolSchemaFormat::JsonSchema
246 }
247 ModelVendor::Google | ModelVendor::XAI | ModelVendor::Unknown => {
248 LanguageModelToolSchemaFormat::JsonSchemaSubset
249 }
250 }
251 }
252
253 fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
254 match choice {
255 LanguageModelToolChoice::Auto
256 | LanguageModelToolChoice::Any
257 | LanguageModelToolChoice::None => self.supports_tools(),
258 }
259 }
260
261 fn telemetry_id(&self) -> String {
262 format!("copilot_chat/{}", self.model.id())
263 }
264
265 fn max_token_count(&self) -> u64 {
266 self.model.max_token_count()
267 }
268
269 fn count_tokens(
270 &self,
271 request: LanguageModelRequest,
272 cx: &App,
273 ) -> BoxFuture<'static, Result<u64>> {
274 let model = self.model.clone();
275 cx.background_spawn(async move {
276 let messages = collect_tiktoken_messages(request);
277 // Copilot uses OpenAI tiktoken tokenizer for all it's model irrespective of the underlying provider(vendor).
278 let tokenizer_model = match model.tokenizer() {
279 Some("o200k_base") => "gpt-4o",
280 Some("cl100k_base") => "gpt-4",
281 _ => "gpt-4o",
282 };
283
284 tiktoken_rs::num_tokens_from_messages(tokenizer_model, &messages)
285 .map(|tokens| tokens as u64)
286 })
287 .boxed()
288 }
289
290 fn stream_completion(
291 &self,
292 request: LanguageModelRequest,
293 cx: &AsyncApp,
294 ) -> BoxFuture<
295 'static,
296 Result<
297 BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>,
298 LanguageModelCompletionError,
299 >,
300 > {
301 let is_user_initiated = request.intent.is_none_or(|intent| match intent {
302 CompletionIntent::UserPrompt
303 | CompletionIntent::ThreadContextSummarization
304 | CompletionIntent::InlineAssist
305 | CompletionIntent::TerminalInlineAssist
306 | CompletionIntent::GenerateGitCommitMessage => true,
307
308 CompletionIntent::ToolResults
309 | CompletionIntent::ThreadSummarization
310 | CompletionIntent::CreateFile
311 | CompletionIntent::EditFile => false,
312 });
313
314 if self.model.supports_response() {
315 let responses_request = into_copilot_responses(&self.model, request);
316 let request_limiter = self.request_limiter.clone();
317 let future = cx.spawn(async move |cx| {
318 let request =
319 CopilotChat::stream_response(responses_request, is_user_initiated, cx.clone());
320 request_limiter
321 .stream(async move {
322 let stream = request.await?;
323 let mapper = CopilotResponsesEventMapper::new();
324 Ok(mapper.map_stream(stream).boxed())
325 })
326 .await
327 });
328 return async move { Ok(future.await?.boxed()) }.boxed();
329 }
330
331 let copilot_request = match into_copilot_chat(&self.model, request) {
332 Ok(request) => request,
333 Err(err) => return futures::future::ready(Err(err.into())).boxed(),
334 };
335 let is_streaming = copilot_request.stream;
336
337 let request_limiter = self.request_limiter.clone();
338 let future = cx.spawn(async move |cx| {
339 let request =
340 CopilotChat::stream_completion(copilot_request, is_user_initiated, cx.clone());
341 request_limiter
342 .stream(async move {
343 let response = request.await?;
344 Ok(map_to_language_model_completion_events(
345 response,
346 is_streaming,
347 ))
348 })
349 .await
350 });
351 async move { Ok(future.await?.boxed()) }.boxed()
352 }
353}
354
355pub fn map_to_language_model_completion_events(
356 events: Pin<Box<dyn Send + Stream<Item = Result<ResponseEvent>>>>,
357 is_streaming: bool,
358) -> impl Stream<Item = Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
359 #[derive(Default)]
360 struct RawToolCall {
361 id: String,
362 name: String,
363 arguments: String,
364 thought_signature: Option<String>,
365 }
366
367 struct State {
368 events: Pin<Box<dyn Send + Stream<Item = Result<ResponseEvent>>>>,
369 tool_calls_by_index: HashMap<usize, RawToolCall>,
370 reasoning_opaque: Option<String>,
371 reasoning_text: Option<String>,
372 }
373
374 futures::stream::unfold(
375 State {
376 events,
377 tool_calls_by_index: HashMap::default(),
378 reasoning_opaque: None,
379 reasoning_text: None,
380 },
381 move |mut state| async move {
382 if let Some(event) = state.events.next().await {
383 match event {
384 Ok(event) => {
385 let Some(choice) = event.choices.first() else {
386 return Some((
387 vec![Err(anyhow!("Response contained no choices").into())],
388 state,
389 ));
390 };
391
392 let delta = if is_streaming {
393 choice.delta.as_ref()
394 } else {
395 choice.message.as_ref()
396 };
397
398 let Some(delta) = delta else {
399 return Some((
400 vec![Err(anyhow!("Response contained no delta").into())],
401 state,
402 ));
403 };
404
405 let mut events = Vec::new();
406 if let Some(content) = delta.content.clone() {
407 events.push(Ok(LanguageModelCompletionEvent::Text(content)));
408 }
409
410 // Capture reasoning data from the delta (e.g. for Gemini 3)
411 if let Some(opaque) = delta.reasoning_opaque.clone() {
412 state.reasoning_opaque = Some(opaque);
413 }
414 if let Some(text) = delta.reasoning_text.clone() {
415 state.reasoning_text = Some(text);
416 }
417
418 for (index, tool_call) in delta.tool_calls.iter().enumerate() {
419 let tool_index = tool_call.index.unwrap_or(index);
420 let entry = state.tool_calls_by_index.entry(tool_index).or_default();
421
422 if let Some(tool_id) = tool_call.id.clone() {
423 entry.id = tool_id;
424 }
425
426 if let Some(function) = tool_call.function.as_ref() {
427 if let Some(name) = function.name.clone() {
428 entry.name = name;
429 }
430
431 if let Some(arguments) = function.arguments.clone() {
432 entry.arguments.push_str(&arguments);
433 }
434
435 if let Some(thought_signature) = function.thought_signature.clone()
436 {
437 entry.thought_signature = Some(thought_signature);
438 }
439 }
440 }
441
442 if let Some(usage) = event.usage {
443 events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(
444 TokenUsage {
445 input_tokens: usage.prompt_tokens,
446 output_tokens: usage.completion_tokens,
447 cache_creation_input_tokens: 0,
448 cache_read_input_tokens: 0,
449 },
450 )));
451 }
452
453 match choice.finish_reason.as_deref() {
454 Some("stop") => {
455 events.push(Ok(LanguageModelCompletionEvent::Stop(
456 StopReason::EndTurn,
457 )));
458 }
459 Some("tool_calls") => {
460 // Gemini 3 models send reasoning_opaque/reasoning_text that must
461 // be preserved and sent back in subsequent requests. Emit as
462 // ReasoningDetails so the agent stores it in the message.
463 if state.reasoning_opaque.is_some()
464 || state.reasoning_text.is_some()
465 {
466 let mut details = serde_json::Map::new();
467 if let Some(opaque) = state.reasoning_opaque.take() {
468 details.insert(
469 "reasoning_opaque".to_string(),
470 serde_json::Value::String(opaque),
471 );
472 }
473 if let Some(text) = state.reasoning_text.take() {
474 details.insert(
475 "reasoning_text".to_string(),
476 serde_json::Value::String(text),
477 );
478 }
479 events.push(Ok(
480 LanguageModelCompletionEvent::ReasoningDetails(
481 serde_json::Value::Object(details),
482 ),
483 ));
484 }
485
486 events.extend(state.tool_calls_by_index.drain().map(
487 |(_, tool_call)| {
488 // The model can output an empty string
489 // to indicate the absence of arguments.
490 // When that happens, create an empty
491 // object instead.
492 let arguments = if tool_call.arguments.is_empty() {
493 Ok(serde_json::Value::Object(Default::default()))
494 } else {
495 serde_json::Value::from_str(&tool_call.arguments)
496 };
497 match arguments {
498 Ok(input) => Ok(LanguageModelCompletionEvent::ToolUse(
499 LanguageModelToolUse {
500 id: tool_call.id.into(),
501 name: tool_call.name.as_str().into(),
502 is_input_complete: true,
503 input,
504 raw_input: tool_call.arguments,
505 thought_signature: tool_call.thought_signature,
506 },
507 )),
508 Err(error) => Ok(
509 LanguageModelCompletionEvent::ToolUseJsonParseError {
510 id: tool_call.id.into(),
511 tool_name: tool_call.name.as_str().into(),
512 raw_input: tool_call.arguments.into(),
513 json_parse_error: error.to_string(),
514 },
515 ),
516 }
517 },
518 ));
519
520 events.push(Ok(LanguageModelCompletionEvent::Stop(
521 StopReason::ToolUse,
522 )));
523 }
524 Some(stop_reason) => {
525 log::error!("Unexpected Copilot Chat stop_reason: {stop_reason:?}");
526 events.push(Ok(LanguageModelCompletionEvent::Stop(
527 StopReason::EndTurn,
528 )));
529 }
530 None => {}
531 }
532
533 return Some((events, state));
534 }
535 Err(err) => return Some((vec![Err(anyhow!(err).into())], state)),
536 }
537 }
538
539 None
540 },
541 )
542 .flat_map(futures::stream::iter)
543}
544
545pub struct CopilotResponsesEventMapper {
546 pending_stop_reason: Option<StopReason>,
547}
548
549impl CopilotResponsesEventMapper {
550 pub fn new() -> Self {
551 Self {
552 pending_stop_reason: None,
553 }
554 }
555
556 pub fn map_stream(
557 mut self,
558 events: Pin<Box<dyn Send + Stream<Item = Result<copilot::copilot_responses::StreamEvent>>>>,
559 ) -> impl Stream<Item = Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
560 {
561 events.flat_map(move |event| {
562 futures::stream::iter(match event {
563 Ok(event) => self.map_event(event),
564 Err(error) => vec![Err(LanguageModelCompletionError::from(anyhow!(error)))],
565 })
566 })
567 }
568
569 fn map_event(
570 &mut self,
571 event: copilot::copilot_responses::StreamEvent,
572 ) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
573 match event {
574 copilot::copilot_responses::StreamEvent::OutputItemAdded { item, .. } => match item {
575 copilot::copilot_responses::ResponseOutputItem::Message { id, .. } => {
576 vec![Ok(LanguageModelCompletionEvent::StartMessage {
577 message_id: id,
578 })]
579 }
580 _ => Vec::new(),
581 },
582
583 copilot::copilot_responses::StreamEvent::OutputTextDelta { delta, .. } => {
584 if delta.is_empty() {
585 Vec::new()
586 } else {
587 vec![Ok(LanguageModelCompletionEvent::Text(delta))]
588 }
589 }
590
591 copilot::copilot_responses::StreamEvent::OutputItemDone { item, .. } => match item {
592 copilot::copilot_responses::ResponseOutputItem::Message { .. } => Vec::new(),
593 copilot::copilot_responses::ResponseOutputItem::FunctionCall {
594 call_id,
595 name,
596 arguments,
597 thought_signature,
598 ..
599 } => {
600 let mut events = Vec::new();
601 match serde_json::from_str::<serde_json::Value>(&arguments) {
602 Ok(input) => events.push(Ok(LanguageModelCompletionEvent::ToolUse(
603 LanguageModelToolUse {
604 id: call_id.into(),
605 name: name.as_str().into(),
606 is_input_complete: true,
607 input,
608 raw_input: arguments.clone(),
609 thought_signature,
610 },
611 ))),
612 Err(error) => {
613 events.push(Ok(LanguageModelCompletionEvent::ToolUseJsonParseError {
614 id: call_id.into(),
615 tool_name: name.as_str().into(),
616 raw_input: arguments.clone().into(),
617 json_parse_error: error.to_string(),
618 }))
619 }
620 }
621 // Record that we already emitted a tool-use stop so we can avoid duplicating
622 // a Stop event on Completed.
623 self.pending_stop_reason = Some(StopReason::ToolUse);
624 events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::ToolUse)));
625 events
626 }
627 copilot::copilot_responses::ResponseOutputItem::Reasoning {
628 summary,
629 encrypted_content,
630 ..
631 } => {
632 let mut events = Vec::new();
633
634 if let Some(blocks) = summary {
635 let mut text = String::new();
636 for block in blocks {
637 text.push_str(&block.text);
638 }
639 if !text.is_empty() {
640 events.push(Ok(LanguageModelCompletionEvent::Thinking {
641 text,
642 signature: None,
643 }));
644 }
645 }
646
647 if let Some(data) = encrypted_content {
648 events.push(Ok(LanguageModelCompletionEvent::RedactedThinking { data }));
649 }
650
651 events
652 }
653 },
654
655 copilot::copilot_responses::StreamEvent::Completed { response } => {
656 let mut events = Vec::new();
657 if let Some(usage) = response.usage {
658 events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
659 input_tokens: usage.input_tokens.unwrap_or(0),
660 output_tokens: usage.output_tokens.unwrap_or(0),
661 cache_creation_input_tokens: 0,
662 cache_read_input_tokens: 0,
663 })));
664 }
665 if self.pending_stop_reason.take() != Some(StopReason::ToolUse) {
666 events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::EndTurn)));
667 }
668 events
669 }
670
671 copilot::copilot_responses::StreamEvent::Incomplete { response } => {
672 let reason = response
673 .incomplete_details
674 .as_ref()
675 .and_then(|details| details.reason.as_ref());
676 let stop_reason = match reason {
677 Some(copilot::copilot_responses::IncompleteReason::MaxOutputTokens) => {
678 StopReason::MaxTokens
679 }
680 Some(copilot::copilot_responses::IncompleteReason::ContentFilter) => {
681 StopReason::Refusal
682 }
683 _ => self
684 .pending_stop_reason
685 .take()
686 .unwrap_or(StopReason::EndTurn),
687 };
688
689 let mut events = Vec::new();
690 if let Some(usage) = response.usage {
691 events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
692 input_tokens: usage.input_tokens.unwrap_or(0),
693 output_tokens: usage.output_tokens.unwrap_or(0),
694 cache_creation_input_tokens: 0,
695 cache_read_input_tokens: 0,
696 })));
697 }
698 events.push(Ok(LanguageModelCompletionEvent::Stop(stop_reason)));
699 events
700 }
701
702 copilot::copilot_responses::StreamEvent::Failed { response } => {
703 let provider = PROVIDER_NAME;
704 let (status_code, message) = match response.error {
705 Some(error) => {
706 let status_code = StatusCode::from_str(&error.code)
707 .unwrap_or(StatusCode::INTERNAL_SERVER_ERROR);
708 (status_code, error.message)
709 }
710 None => (
711 StatusCode::INTERNAL_SERVER_ERROR,
712 "response.failed".to_string(),
713 ),
714 };
715 vec![Err(LanguageModelCompletionError::HttpResponseError {
716 provider,
717 status_code,
718 message,
719 })]
720 }
721
722 copilot::copilot_responses::StreamEvent::GenericError { error } => vec![Err(
723 LanguageModelCompletionError::Other(anyhow!(format!("{error:?}"))),
724 )],
725
726 copilot::copilot_responses::StreamEvent::Created { .. }
727 | copilot::copilot_responses::StreamEvent::Unknown => Vec::new(),
728 }
729 }
730}
731
732fn into_copilot_chat(
733 model: &copilot::copilot_chat::Model,
734 request: LanguageModelRequest,
735) -> Result<CopilotChatRequest> {
736 let mut request_messages: Vec<LanguageModelRequestMessage> = Vec::new();
737 for message in request.messages {
738 if let Some(last_message) = request_messages.last_mut() {
739 if last_message.role == message.role {
740 last_message.content.extend(message.content);
741 } else {
742 request_messages.push(message);
743 }
744 } else {
745 request_messages.push(message);
746 }
747 }
748
749 let mut messages: Vec<ChatMessage> = Vec::new();
750 for message in request_messages {
751 match message.role {
752 Role::User => {
753 for content in &message.content {
754 if let MessageContent::ToolResult(tool_result) = content {
755 let content = match &tool_result.content {
756 LanguageModelToolResultContent::Text(text) => text.to_string().into(),
757 LanguageModelToolResultContent::Image(image) => {
758 if model.supports_vision() {
759 ChatMessageContent::Multipart(vec![ChatMessagePart::Image {
760 image_url: ImageUrl {
761 url: image.to_base64_url(),
762 },
763 }])
764 } else {
765 debug_panic!(
766 "This should be caught at {} level",
767 tool_result.tool_name
768 );
769 "[Tool responded with an image, but this model does not support vision]".to_string().into()
770 }
771 }
772 };
773
774 messages.push(ChatMessage::Tool {
775 tool_call_id: tool_result.tool_use_id.to_string(),
776 content,
777 });
778 }
779 }
780
781 let mut content_parts = Vec::new();
782 for content in &message.content {
783 match content {
784 MessageContent::Text(text) | MessageContent::Thinking { text, .. }
785 if !text.is_empty() =>
786 {
787 if let Some(ChatMessagePart::Text { text: text_content }) =
788 content_parts.last_mut()
789 {
790 text_content.push_str(text);
791 } else {
792 content_parts.push(ChatMessagePart::Text {
793 text: text.to_string(),
794 });
795 }
796 }
797 MessageContent::Image(image) if model.supports_vision() => {
798 content_parts.push(ChatMessagePart::Image {
799 image_url: ImageUrl {
800 url: image.to_base64_url(),
801 },
802 });
803 }
804 _ => {}
805 }
806 }
807
808 if !content_parts.is_empty() {
809 messages.push(ChatMessage::User {
810 content: content_parts.into(),
811 });
812 }
813 }
814 Role::Assistant => {
815 let mut tool_calls = Vec::new();
816 for content in &message.content {
817 if let MessageContent::ToolUse(tool_use) = content {
818 tool_calls.push(ToolCall {
819 id: tool_use.id.to_string(),
820 content: copilot::copilot_chat::ToolCallContent::Function {
821 function: copilot::copilot_chat::FunctionContent {
822 name: tool_use.name.to_string(),
823 arguments: serde_json::to_string(&tool_use.input)?,
824 thought_signature: tool_use.thought_signature.clone(),
825 },
826 },
827 });
828 }
829 }
830
831 let text_content = {
832 let mut buffer = String::new();
833 for string in message.content.iter().filter_map(|content| match content {
834 MessageContent::Text(text) | MessageContent::Thinking { text, .. } => {
835 Some(text.as_str())
836 }
837 MessageContent::ToolUse(_)
838 | MessageContent::RedactedThinking(_)
839 | MessageContent::ToolResult(_)
840 | MessageContent::Image(_) => None,
841 }) {
842 buffer.push_str(string);
843 }
844
845 buffer
846 };
847
848 // Extract reasoning_opaque and reasoning_text from reasoning_details
849 let (reasoning_opaque, reasoning_text) =
850 if let Some(details) = &message.reasoning_details {
851 let opaque = details
852 .get("reasoning_opaque")
853 .and_then(|v| v.as_str())
854 .map(|s| s.to_string());
855 let text = details
856 .get("reasoning_text")
857 .and_then(|v| v.as_str())
858 .map(|s| s.to_string());
859 (opaque, text)
860 } else {
861 (None, None)
862 };
863
864 messages.push(ChatMessage::Assistant {
865 content: if text_content.is_empty() {
866 ChatMessageContent::empty()
867 } else {
868 text_content.into()
869 },
870 tool_calls,
871 reasoning_opaque,
872 reasoning_text,
873 });
874 }
875 Role::System => messages.push(ChatMessage::System {
876 content: message.string_contents(),
877 }),
878 }
879 }
880
881 let tools = request
882 .tools
883 .iter()
884 .map(|tool| Tool::Function {
885 function: copilot::copilot_chat::Function {
886 name: tool.name.clone(),
887 description: tool.description.clone(),
888 parameters: tool.input_schema.clone(),
889 },
890 })
891 .collect::<Vec<_>>();
892
893 Ok(CopilotChatRequest {
894 intent: true,
895 n: 1,
896 stream: model.uses_streaming(),
897 temperature: 0.1,
898 model: model.id().to_string(),
899 messages,
900 tools,
901 tool_choice: request.tool_choice.map(|choice| match choice {
902 LanguageModelToolChoice::Auto => copilot::copilot_chat::ToolChoice::Auto,
903 LanguageModelToolChoice::Any => copilot::copilot_chat::ToolChoice::Any,
904 LanguageModelToolChoice::None => copilot::copilot_chat::ToolChoice::None,
905 }),
906 })
907}
908
909fn into_copilot_responses(
910 model: &copilot::copilot_chat::Model,
911 request: LanguageModelRequest,
912) -> copilot::copilot_responses::Request {
913 use copilot::copilot_responses as responses;
914
915 let LanguageModelRequest {
916 thread_id: _,
917 prompt_id: _,
918 intent: _,
919 mode: _,
920 messages,
921 tools,
922 tool_choice,
923 stop: _,
924 temperature,
925 thinking_allowed: _,
926 } = request;
927
928 let mut input_items: Vec<responses::ResponseInputItem> = Vec::new();
929
930 for message in messages {
931 match message.role {
932 Role::User => {
933 for content in &message.content {
934 if let MessageContent::ToolResult(tool_result) = content {
935 let output = if let Some(out) = &tool_result.output {
936 match out {
937 serde_json::Value::String(s) => {
938 responses::ResponseFunctionOutput::Text(s.clone())
939 }
940 serde_json::Value::Null => {
941 responses::ResponseFunctionOutput::Text(String::new())
942 }
943 other => responses::ResponseFunctionOutput::Text(other.to_string()),
944 }
945 } else {
946 match &tool_result.content {
947 LanguageModelToolResultContent::Text(text) => {
948 responses::ResponseFunctionOutput::Text(text.to_string())
949 }
950 LanguageModelToolResultContent::Image(image) => {
951 if model.supports_vision() {
952 responses::ResponseFunctionOutput::Content(vec![
953 responses::ResponseInputContent::InputImage {
954 image_url: Some(image.to_base64_url()),
955 detail: Default::default(),
956 },
957 ])
958 } else {
959 debug_panic!(
960 "This should be caught at {} level",
961 tool_result.tool_name
962 );
963 responses::ResponseFunctionOutput::Text(
964 "[Tool responded with an image, but this model does not support vision]".into(),
965 )
966 }
967 }
968 }
969 };
970
971 input_items.push(responses::ResponseInputItem::FunctionCallOutput {
972 call_id: tool_result.tool_use_id.to_string(),
973 output,
974 status: None,
975 });
976 }
977 }
978
979 let mut parts: Vec<responses::ResponseInputContent> = Vec::new();
980 for content in &message.content {
981 match content {
982 MessageContent::Text(text) => {
983 parts.push(responses::ResponseInputContent::InputText {
984 text: text.clone(),
985 });
986 }
987
988 MessageContent::Image(image) => {
989 if model.supports_vision() {
990 parts.push(responses::ResponseInputContent::InputImage {
991 image_url: Some(image.to_base64_url()),
992 detail: Default::default(),
993 });
994 }
995 }
996 _ => {}
997 }
998 }
999
1000 if !parts.is_empty() {
1001 input_items.push(responses::ResponseInputItem::Message {
1002 role: "user".into(),
1003 content: Some(parts),
1004 status: None,
1005 });
1006 }
1007 }
1008
1009 Role::Assistant => {
1010 for content in &message.content {
1011 if let MessageContent::ToolUse(tool_use) = content {
1012 input_items.push(responses::ResponseInputItem::FunctionCall {
1013 call_id: tool_use.id.to_string(),
1014 name: tool_use.name.to_string(),
1015 arguments: tool_use.raw_input.clone(),
1016 status: None,
1017 thought_signature: tool_use.thought_signature.clone(),
1018 });
1019 }
1020 }
1021
1022 for content in &message.content {
1023 if let MessageContent::RedactedThinking(data) = content {
1024 input_items.push(responses::ResponseInputItem::Reasoning {
1025 id: None,
1026 summary: Vec::new(),
1027 encrypted_content: data.clone(),
1028 });
1029 }
1030 }
1031
1032 let mut parts: Vec<responses::ResponseInputContent> = Vec::new();
1033 for content in &message.content {
1034 match content {
1035 MessageContent::Text(text) => {
1036 parts.push(responses::ResponseInputContent::OutputText {
1037 text: text.clone(),
1038 });
1039 }
1040 MessageContent::Image(_) => {
1041 parts.push(responses::ResponseInputContent::OutputText {
1042 text: "[image omitted]".to_string(),
1043 });
1044 }
1045 _ => {}
1046 }
1047 }
1048
1049 if !parts.is_empty() {
1050 input_items.push(responses::ResponseInputItem::Message {
1051 role: "assistant".into(),
1052 content: Some(parts),
1053 status: Some("completed".into()),
1054 });
1055 }
1056 }
1057
1058 Role::System => {
1059 let mut parts: Vec<responses::ResponseInputContent> = Vec::new();
1060 for content in &message.content {
1061 if let MessageContent::Text(text) = content {
1062 parts.push(responses::ResponseInputContent::InputText {
1063 text: text.clone(),
1064 });
1065 }
1066 }
1067
1068 if !parts.is_empty() {
1069 input_items.push(responses::ResponseInputItem::Message {
1070 role: "system".into(),
1071 content: Some(parts),
1072 status: None,
1073 });
1074 }
1075 }
1076 }
1077 }
1078
1079 let converted_tools: Vec<responses::ToolDefinition> = tools
1080 .into_iter()
1081 .map(|tool| responses::ToolDefinition::Function {
1082 name: tool.name,
1083 description: Some(tool.description),
1084 parameters: Some(tool.input_schema),
1085 strict: None,
1086 })
1087 .collect();
1088
1089 let mapped_tool_choice = tool_choice.map(|choice| match choice {
1090 LanguageModelToolChoice::Auto => responses::ToolChoice::Auto,
1091 LanguageModelToolChoice::Any => responses::ToolChoice::Any,
1092 LanguageModelToolChoice::None => responses::ToolChoice::None,
1093 });
1094
1095 responses::Request {
1096 model: model.id().to_string(),
1097 input: input_items,
1098 stream: model.uses_streaming(),
1099 temperature,
1100 tools: converted_tools,
1101 tool_choice: mapped_tool_choice,
1102 reasoning: None, // We would need to add support for setting from user settings.
1103 include: Some(vec![
1104 copilot::copilot_responses::ResponseIncludable::ReasoningEncryptedContent,
1105 ]),
1106 }
1107}
1108
1109#[cfg(test)]
1110mod tests {
1111 use super::*;
1112 use copilot::copilot_responses as responses;
1113 use futures::StreamExt;
1114
1115 fn map_events(events: Vec<responses::StreamEvent>) -> Vec<LanguageModelCompletionEvent> {
1116 futures::executor::block_on(async {
1117 CopilotResponsesEventMapper::new()
1118 .map_stream(Box::pin(futures::stream::iter(events.into_iter().map(Ok))))
1119 .collect::<Vec<_>>()
1120 .await
1121 .into_iter()
1122 .map(Result::unwrap)
1123 .collect()
1124 })
1125 }
1126
1127 #[test]
1128 fn responses_stream_maps_text_and_usage() {
1129 let events = vec![
1130 responses::StreamEvent::OutputItemAdded {
1131 output_index: 0,
1132 sequence_number: None,
1133 item: responses::ResponseOutputItem::Message {
1134 id: "msg_1".into(),
1135 role: "assistant".into(),
1136 content: Some(Vec::new()),
1137 },
1138 },
1139 responses::StreamEvent::OutputTextDelta {
1140 item_id: "msg_1".into(),
1141 output_index: 0,
1142 delta: "Hello".into(),
1143 },
1144 responses::StreamEvent::Completed {
1145 response: responses::Response {
1146 usage: Some(responses::ResponseUsage {
1147 input_tokens: Some(5),
1148 output_tokens: Some(3),
1149 total_tokens: Some(8),
1150 }),
1151 ..Default::default()
1152 },
1153 },
1154 ];
1155
1156 let mapped = map_events(events);
1157 assert!(matches!(
1158 mapped[0],
1159 LanguageModelCompletionEvent::StartMessage { ref message_id } if message_id == "msg_1"
1160 ));
1161 assert!(matches!(
1162 mapped[1],
1163 LanguageModelCompletionEvent::Text(ref text) if text == "Hello"
1164 ));
1165 assert!(matches!(
1166 mapped[2],
1167 LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
1168 input_tokens: 5,
1169 output_tokens: 3,
1170 ..
1171 })
1172 ));
1173 assert!(matches!(
1174 mapped[3],
1175 LanguageModelCompletionEvent::Stop(StopReason::EndTurn)
1176 ));
1177 }
1178
1179 #[test]
1180 fn responses_stream_maps_tool_calls() {
1181 let events = vec![responses::StreamEvent::OutputItemDone {
1182 output_index: 0,
1183 sequence_number: None,
1184 item: responses::ResponseOutputItem::FunctionCall {
1185 id: Some("fn_1".into()),
1186 call_id: "call_1".into(),
1187 name: "do_it".into(),
1188 arguments: "{\"x\":1}".into(),
1189 status: None,
1190 thought_signature: None,
1191 },
1192 }];
1193
1194 let mapped = map_events(events);
1195 assert!(matches!(
1196 mapped[0],
1197 LanguageModelCompletionEvent::ToolUse(ref use_) if use_.id.to_string() == "call_1" && use_.name.as_ref() == "do_it"
1198 ));
1199 assert!(matches!(
1200 mapped[1],
1201 LanguageModelCompletionEvent::Stop(StopReason::ToolUse)
1202 ));
1203 }
1204
1205 #[test]
1206 fn responses_stream_handles_json_parse_error() {
1207 let events = vec![responses::StreamEvent::OutputItemDone {
1208 output_index: 0,
1209 sequence_number: None,
1210 item: responses::ResponseOutputItem::FunctionCall {
1211 id: Some("fn_1".into()),
1212 call_id: "call_1".into(),
1213 name: "do_it".into(),
1214 arguments: "{not json}".into(),
1215 status: None,
1216 thought_signature: None,
1217 },
1218 }];
1219
1220 let mapped = map_events(events);
1221 assert!(matches!(
1222 mapped[0],
1223 LanguageModelCompletionEvent::ToolUseJsonParseError { ref id, ref tool_name, .. }
1224 if id.to_string() == "call_1" && tool_name.as_ref() == "do_it"
1225 ));
1226 assert!(matches!(
1227 mapped[1],
1228 LanguageModelCompletionEvent::Stop(StopReason::ToolUse)
1229 ));
1230 }
1231
1232 #[test]
1233 fn responses_stream_maps_reasoning_summary_and_encrypted_content() {
1234 let events = vec![responses::StreamEvent::OutputItemDone {
1235 output_index: 0,
1236 sequence_number: None,
1237 item: responses::ResponseOutputItem::Reasoning {
1238 id: "r1".into(),
1239 summary: Some(vec![responses::ResponseReasoningItem {
1240 kind: "summary_text".into(),
1241 text: "Chain".into(),
1242 }]),
1243 encrypted_content: Some("ENC".into()),
1244 },
1245 }];
1246
1247 let mapped = map_events(events);
1248 assert!(matches!(
1249 mapped[0],
1250 LanguageModelCompletionEvent::Thinking { ref text, signature: None } if text == "Chain"
1251 ));
1252 assert!(matches!(
1253 mapped[1],
1254 LanguageModelCompletionEvent::RedactedThinking { ref data } if data == "ENC"
1255 ));
1256 }
1257
1258 #[test]
1259 fn responses_stream_handles_incomplete_max_tokens() {
1260 let events = vec![responses::StreamEvent::Incomplete {
1261 response: responses::Response {
1262 usage: Some(responses::ResponseUsage {
1263 input_tokens: Some(10),
1264 output_tokens: Some(0),
1265 total_tokens: Some(10),
1266 }),
1267 incomplete_details: Some(responses::IncompleteDetails {
1268 reason: Some(responses::IncompleteReason::MaxOutputTokens),
1269 }),
1270 ..Default::default()
1271 },
1272 }];
1273
1274 let mapped = map_events(events);
1275 assert!(matches!(
1276 mapped[0],
1277 LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
1278 input_tokens: 10,
1279 output_tokens: 0,
1280 ..
1281 })
1282 ));
1283 assert!(matches!(
1284 mapped[1],
1285 LanguageModelCompletionEvent::Stop(StopReason::MaxTokens)
1286 ));
1287 }
1288
1289 #[test]
1290 fn responses_stream_handles_incomplete_content_filter() {
1291 let events = vec![responses::StreamEvent::Incomplete {
1292 response: responses::Response {
1293 usage: None,
1294 incomplete_details: Some(responses::IncompleteDetails {
1295 reason: Some(responses::IncompleteReason::ContentFilter),
1296 }),
1297 ..Default::default()
1298 },
1299 }];
1300
1301 let mapped = map_events(events);
1302 assert!(matches!(
1303 mapped.last().unwrap(),
1304 LanguageModelCompletionEvent::Stop(StopReason::Refusal)
1305 ));
1306 }
1307
1308 #[test]
1309 fn responses_stream_completed_no_duplicate_after_tool_use() {
1310 let events = vec![
1311 responses::StreamEvent::OutputItemDone {
1312 output_index: 0,
1313 sequence_number: None,
1314 item: responses::ResponseOutputItem::FunctionCall {
1315 id: Some("fn_1".into()),
1316 call_id: "call_1".into(),
1317 name: "do_it".into(),
1318 arguments: "{}".into(),
1319 status: None,
1320 thought_signature: None,
1321 },
1322 },
1323 responses::StreamEvent::Completed {
1324 response: responses::Response::default(),
1325 },
1326 ];
1327
1328 let mapped = map_events(events);
1329
1330 let mut stop_count = 0usize;
1331 let mut saw_tool_use_stop = false;
1332 for event in mapped {
1333 if let LanguageModelCompletionEvent::Stop(reason) = event {
1334 stop_count += 1;
1335 if matches!(reason, StopReason::ToolUse) {
1336 saw_tool_use_stop = true;
1337 }
1338 }
1339 }
1340 assert_eq!(stop_count, 1, "should emit exactly one Stop event");
1341 assert!(saw_tool_use_stop, "Stop reason should be ToolUse");
1342 }
1343
1344 #[test]
1345 fn responses_stream_failed_maps_http_response_error() {
1346 let events = vec![responses::StreamEvent::Failed {
1347 response: responses::Response {
1348 error: Some(responses::ResponseError {
1349 code: "429".into(),
1350 message: "too many requests".into(),
1351 }),
1352 ..Default::default()
1353 },
1354 }];
1355
1356 let mapped_results = futures::executor::block_on(async {
1357 CopilotResponsesEventMapper::new()
1358 .map_stream(Box::pin(futures::stream::iter(events.into_iter().map(Ok))))
1359 .collect::<Vec<_>>()
1360 .await
1361 });
1362
1363 assert_eq!(mapped_results.len(), 1);
1364 match &mapped_results[0] {
1365 Err(LanguageModelCompletionError::HttpResponseError {
1366 status_code,
1367 message,
1368 ..
1369 }) => {
1370 assert_eq!(*status_code, http_client::StatusCode::TOO_MANY_REQUESTS);
1371 assert_eq!(message, "too many requests");
1372 }
1373 other => panic!("expected HttpResponseError, got {:?}", other),
1374 }
1375 }
1376
1377 #[test]
1378 fn chat_completions_stream_maps_reasoning_data() {
1379 use copilot::copilot_chat::ResponseEvent;
1380
1381 let events = vec![
1382 ResponseEvent {
1383 choices: vec![copilot::copilot_chat::ResponseChoice {
1384 index: Some(0),
1385 finish_reason: None,
1386 delta: Some(copilot::copilot_chat::ResponseDelta {
1387 content: None,
1388 role: Some(copilot::copilot_chat::Role::Assistant),
1389 tool_calls: vec![copilot::copilot_chat::ToolCallChunk {
1390 index: Some(0),
1391 id: Some("call_abc123".to_string()),
1392 function: Some(copilot::copilot_chat::FunctionChunk {
1393 name: Some("list_directory".to_string()),
1394 arguments: Some("{\"path\":\"test\"}".to_string()),
1395 thought_signature: None,
1396 }),
1397 }],
1398 reasoning_opaque: Some("encrypted_reasoning_token_xyz".to_string()),
1399 reasoning_text: Some("Let me check the directory".to_string()),
1400 }),
1401 message: None,
1402 }],
1403 id: "chatcmpl-123".to_string(),
1404 usage: None,
1405 },
1406 ResponseEvent {
1407 choices: vec![copilot::copilot_chat::ResponseChoice {
1408 index: Some(0),
1409 finish_reason: Some("tool_calls".to_string()),
1410 delta: Some(copilot::copilot_chat::ResponseDelta {
1411 content: None,
1412 role: None,
1413 tool_calls: vec![],
1414 reasoning_opaque: None,
1415 reasoning_text: None,
1416 }),
1417 message: None,
1418 }],
1419 id: "chatcmpl-123".to_string(),
1420 usage: None,
1421 },
1422 ];
1423
1424 let mapped = futures::executor::block_on(async {
1425 map_to_language_model_completion_events(
1426 Box::pin(futures::stream::iter(events.into_iter().map(Ok))),
1427 true,
1428 )
1429 .collect::<Vec<_>>()
1430 .await
1431 });
1432
1433 let mut has_reasoning_details = false;
1434 let mut has_tool_use = false;
1435 let mut reasoning_opaque_value: Option<String> = None;
1436 let mut reasoning_text_value: Option<String> = None;
1437
1438 for event_result in mapped {
1439 match event_result {
1440 Ok(LanguageModelCompletionEvent::ReasoningDetails(details)) => {
1441 has_reasoning_details = true;
1442 reasoning_opaque_value = details
1443 .get("reasoning_opaque")
1444 .and_then(|v| v.as_str())
1445 .map(|s| s.to_string());
1446 reasoning_text_value = details
1447 .get("reasoning_text")
1448 .and_then(|v| v.as_str())
1449 .map(|s| s.to_string());
1450 }
1451 Ok(LanguageModelCompletionEvent::ToolUse(tool_use)) => {
1452 has_tool_use = true;
1453 assert_eq!(tool_use.id.to_string(), "call_abc123");
1454 assert_eq!(tool_use.name.as_ref(), "list_directory");
1455 }
1456 _ => {}
1457 }
1458 }
1459
1460 assert!(
1461 has_reasoning_details,
1462 "Should emit ReasoningDetails event for Gemini 3 reasoning"
1463 );
1464 assert!(has_tool_use, "Should emit ToolUse event");
1465 assert_eq!(
1466 reasoning_opaque_value,
1467 Some("encrypted_reasoning_token_xyz".to_string()),
1468 "Should capture reasoning_opaque"
1469 );
1470 assert_eq!(
1471 reasoning_text_value,
1472 Some("Let me check the directory".to_string()),
1473 "Should capture reasoning_text"
1474 );
1475 }
1476}
1477struct ConfigurationView {
1478 copilot_status: Option<copilot::Status>,
1479 state: Entity<State>,
1480 _subscription: Option<Subscription>,
1481}
1482
1483impl ConfigurationView {
1484 pub fn new(state: Entity<State>, cx: &mut Context<Self>) -> Self {
1485 let copilot = Copilot::global(cx);
1486
1487 Self {
1488 copilot_status: copilot.as_ref().map(|copilot| copilot.read(cx).status()),
1489 state,
1490 _subscription: copilot.as_ref().map(|copilot| {
1491 cx.observe(copilot, |this, model, cx| {
1492 this.copilot_status = Some(model.read(cx).status());
1493 cx.notify();
1494 })
1495 }),
1496 }
1497 }
1498}
1499
1500impl Render for ConfigurationView {
1501 fn render(&mut self, _window: &mut Window, cx: &mut Context<Self>) -> impl IntoElement {
1502 if self.state.read(cx).is_authenticated(cx) {
1503 ConfiguredApiCard::new("Authorized")
1504 .button_label("Sign Out")
1505 .on_click(|_, window, cx| {
1506 window.dispatch_action(copilot::SignOut.boxed_clone(), cx);
1507 })
1508 .into_any_element()
1509 } else {
1510 let loading_icon = Icon::new(IconName::ArrowCircle).with_rotate_animation(4);
1511
1512 const ERROR_LABEL: &str = "Copilot Chat requires an active GitHub Copilot subscription. Please ensure Copilot is configured and try again, or use a different Assistant provider.";
1513
1514 match &self.copilot_status {
1515 Some(status) => match status {
1516 Status::Starting { task: _ } => h_flex()
1517 .gap_2()
1518 .child(loading_icon)
1519 .child(Label::new("Starting Copilot…"))
1520 .into_any_element(),
1521 Status::SigningIn { prompt: _ }
1522 | Status::SignedOut {
1523 awaiting_signing_in: true,
1524 } => h_flex()
1525 .gap_2()
1526 .child(loading_icon)
1527 .child(Label::new("Signing into Copilot…"))
1528 .into_any_element(),
1529 Status::Error(_) => {
1530 const LABEL: &str = "Copilot had issues starting. Please try restarting it. If the issue persists, try reinstalling Copilot.";
1531 v_flex()
1532 .gap_6()
1533 .child(Label::new(LABEL))
1534 .child(svg().size_8().path(IconName::CopilotError.path()))
1535 .into_any_element()
1536 }
1537 _ => {
1538 const LABEL: &str = "To use Zed's agent with GitHub Copilot, you need to be logged in to GitHub. Note that your GitHub account must have an active Copilot Chat subscription.";
1539
1540 v_flex()
1541 .gap_2()
1542 .child(Label::new(LABEL))
1543 .child(
1544 Button::new("sign_in", "Sign in to use GitHub Copilot")
1545 .full_width()
1546 .style(ButtonStyle::Outlined)
1547 .icon_color(Color::Muted)
1548 .icon(IconName::Github)
1549 .icon_position(IconPosition::Start)
1550 .icon_size(IconSize::Small)
1551 .on_click(|_, window, cx| {
1552 copilot::initiate_sign_in(window, cx)
1553 }),
1554 )
1555 .into_any_element()
1556 }
1557 },
1558 None => v_flex()
1559 .gap_6()
1560 .child(Label::new(ERROR_LABEL))
1561 .into_any_element(),
1562 }
1563 }
1564 }
1565}