1use std::pin::Pin;
2use std::str::FromStr as _;
3use std::sync::Arc;
4
5use anyhow::{Result, anyhow};
6use cloud_llm_client::CompletionIntent;
7use collections::HashMap;
8use copilot::copilot_chat::{
9 ChatMessage, ChatMessageContent, ChatMessagePart, CopilotChat, ImageUrl,
10 Model as CopilotChatModel, ModelVendor, Request as CopilotChatRequest, ResponseEvent, Tool,
11 ToolCall,
12};
13use copilot::{Copilot, Status};
14use futures::future::BoxFuture;
15use futures::stream::BoxStream;
16use futures::{FutureExt, Stream, StreamExt};
17use gpui::{Action, AnyView, App, AsyncApp, Entity, Render, Subscription, Task, svg};
18use http_client::StatusCode;
19use language::language_settings::all_language_settings;
20use language_model::{
21 AuthenticateError, LanguageModel, LanguageModelCompletionError, LanguageModelCompletionEvent,
22 LanguageModelId, LanguageModelName, LanguageModelProvider, LanguageModelProviderId,
23 LanguageModelProviderName, LanguageModelProviderState, LanguageModelRequest,
24 LanguageModelRequestMessage, LanguageModelToolChoice, LanguageModelToolResultContent,
25 LanguageModelToolSchemaFormat, LanguageModelToolUse, MessageContent, RateLimiter, Role,
26 StopReason, TokenUsage,
27};
28use settings::SettingsStore;
29use ui::{CommonAnimationExt, prelude::*};
30use util::debug_panic;
31
32use crate::ui::ConfiguredApiCard;
33
34const PROVIDER_ID: LanguageModelProviderId = LanguageModelProviderId::new("copilot_chat");
35const PROVIDER_NAME: LanguageModelProviderName =
36 LanguageModelProviderName::new("GitHub Copilot Chat");
37
38pub struct CopilotChatLanguageModelProvider {
39 state: Entity<State>,
40}
41
42pub struct State {
43 _copilot_chat_subscription: Option<Subscription>,
44 _settings_subscription: Subscription,
45}
46
47impl State {
48 fn is_authenticated(&self, cx: &App) -> bool {
49 CopilotChat::global(cx)
50 .map(|m| m.read(cx).is_authenticated())
51 .unwrap_or(false)
52 }
53}
54
55impl CopilotChatLanguageModelProvider {
56 pub fn new(cx: &mut App) -> Self {
57 let state = cx.new(|cx| {
58 let copilot_chat_subscription = CopilotChat::global(cx)
59 .map(|copilot_chat| cx.observe(&copilot_chat, |_, _, cx| cx.notify()));
60 State {
61 _copilot_chat_subscription: copilot_chat_subscription,
62 _settings_subscription: cx.observe_global::<SettingsStore>(|_, cx| {
63 if let Some(copilot_chat) = CopilotChat::global(cx) {
64 let language_settings = all_language_settings(None, cx);
65 let configuration = copilot::copilot_chat::CopilotChatConfiguration {
66 enterprise_uri: language_settings
67 .edit_predictions
68 .copilot
69 .enterprise_uri
70 .clone(),
71 };
72 copilot_chat.update(cx, |chat, cx| {
73 chat.set_configuration(configuration, cx);
74 });
75 }
76 cx.notify();
77 }),
78 }
79 });
80
81 Self { state }
82 }
83
84 fn create_language_model(&self, model: CopilotChatModel) -> Arc<dyn LanguageModel> {
85 Arc::new(CopilotChatLanguageModel {
86 model,
87 request_limiter: RateLimiter::new(4),
88 })
89 }
90}
91
92impl LanguageModelProviderState for CopilotChatLanguageModelProvider {
93 type ObservableEntity = State;
94
95 fn observable_entity(&self) -> Option<Entity<Self::ObservableEntity>> {
96 Some(self.state.clone())
97 }
98}
99
100impl LanguageModelProvider for CopilotChatLanguageModelProvider {
101 fn id(&self) -> LanguageModelProviderId {
102 PROVIDER_ID
103 }
104
105 fn name(&self) -> LanguageModelProviderName {
106 PROVIDER_NAME
107 }
108
109 fn icon(&self) -> IconName {
110 IconName::Copilot
111 }
112
113 fn default_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
114 let models = CopilotChat::global(cx).and_then(|m| m.read(cx).models())?;
115 models
116 .first()
117 .map(|model| self.create_language_model(model.clone()))
118 }
119
120 fn default_fast_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
121 // The default model should be Copilot Chat's 'base model', which is likely a relatively fast
122 // model (e.g. 4o) and a sensible choice when considering premium requests
123 self.default_model(cx)
124 }
125
126 fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
127 let Some(models) = CopilotChat::global(cx).and_then(|m| m.read(cx).models()) else {
128 return Vec::new();
129 };
130 models
131 .iter()
132 .map(|model| self.create_language_model(model.clone()))
133 .collect()
134 }
135
136 fn is_authenticated(&self, cx: &App) -> bool {
137 self.state.read(cx).is_authenticated(cx)
138 }
139
140 fn authenticate(&self, cx: &mut App) -> Task<Result<(), AuthenticateError>> {
141 if self.is_authenticated(cx) {
142 return Task::ready(Ok(()));
143 };
144
145 let Some(copilot) = Copilot::global(cx) else {
146 return Task::ready( Err(anyhow!(
147 "Copilot must be enabled for Copilot Chat to work. Please enable Copilot and try again."
148 ).into()));
149 };
150
151 let err = match copilot.read(cx).status() {
152 Status::Authorized => return Task::ready(Ok(())),
153 Status::Disabled => anyhow!(
154 "Copilot must be enabled for Copilot Chat to work. Please enable Copilot and try again."
155 ),
156 Status::Error(err) => anyhow!(format!(
157 "Received the following error while signing into Copilot: {err}"
158 )),
159 Status::Starting { task: _ } => anyhow!(
160 "Copilot is still starting, please wait for Copilot to start then try again"
161 ),
162 Status::Unauthorized => anyhow!(
163 "Unable to authorize with Copilot. Please make sure that you have an active Copilot and Copilot Chat subscription."
164 ),
165 Status::SignedOut { .. } => {
166 anyhow!("You have signed out of Copilot. Please sign in to Copilot and try again.")
167 }
168 Status::SigningIn { prompt: _ } => anyhow!("Still signing into Copilot..."),
169 };
170
171 Task::ready(Err(err.into()))
172 }
173
174 fn configuration_view(
175 &self,
176 _target_agent: language_model::ConfigurationViewTargetAgent,
177 _: &mut Window,
178 cx: &mut App,
179 ) -> AnyView {
180 let state = self.state.clone();
181 cx.new(|cx| ConfigurationView::new(state, cx)).into()
182 }
183
184 fn reset_credentials(&self, _cx: &mut App) -> Task<Result<()>> {
185 Task::ready(Err(anyhow!(
186 "Signing out of GitHub Copilot Chat is currently not supported."
187 )))
188 }
189}
190
191fn collect_tiktoken_messages(
192 request: LanguageModelRequest,
193) -> Vec<tiktoken_rs::ChatCompletionRequestMessage> {
194 request
195 .messages
196 .into_iter()
197 .map(|message| tiktoken_rs::ChatCompletionRequestMessage {
198 role: match message.role {
199 Role::User => "user".into(),
200 Role::Assistant => "assistant".into(),
201 Role::System => "system".into(),
202 },
203 content: Some(message.string_contents()),
204 name: None,
205 function_call: None,
206 })
207 .collect::<Vec<_>>()
208}
209
210pub struct CopilotChatLanguageModel {
211 model: CopilotChatModel,
212 request_limiter: RateLimiter,
213}
214
215impl LanguageModel for CopilotChatLanguageModel {
216 fn id(&self) -> LanguageModelId {
217 LanguageModelId::from(self.model.id().to_string())
218 }
219
220 fn name(&self) -> LanguageModelName {
221 LanguageModelName::from(self.model.display_name().to_string())
222 }
223
224 fn provider_id(&self) -> LanguageModelProviderId {
225 PROVIDER_ID
226 }
227
228 fn provider_name(&self) -> LanguageModelProviderName {
229 PROVIDER_NAME
230 }
231
232 fn supports_tools(&self) -> bool {
233 self.model.supports_tools()
234 }
235
236 fn supports_images(&self) -> bool {
237 self.model.supports_vision()
238 }
239
240 fn tool_input_format(&self) -> LanguageModelToolSchemaFormat {
241 match self.model.vendor() {
242 ModelVendor::OpenAI | ModelVendor::Anthropic => {
243 LanguageModelToolSchemaFormat::JsonSchema
244 }
245 ModelVendor::Google | ModelVendor::XAI | ModelVendor::Unknown => {
246 LanguageModelToolSchemaFormat::JsonSchemaSubset
247 }
248 }
249 }
250
251 fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
252 match choice {
253 LanguageModelToolChoice::Auto
254 | LanguageModelToolChoice::Any
255 | LanguageModelToolChoice::None => self.supports_tools(),
256 }
257 }
258
259 fn telemetry_id(&self) -> String {
260 format!("copilot_chat/{}", self.model.id())
261 }
262
263 fn max_token_count(&self) -> u64 {
264 self.model.max_token_count()
265 }
266
267 fn count_tokens(
268 &self,
269 request: LanguageModelRequest,
270 cx: &App,
271 ) -> BoxFuture<'static, Result<u64>> {
272 let model = self.model.clone();
273 cx.background_spawn(async move {
274 let messages = collect_tiktoken_messages(request);
275 // Copilot uses OpenAI tiktoken tokenizer for all it's model irrespective of the underlying provider(vendor).
276 let tokenizer_model = match model.tokenizer() {
277 Some("o200k_base") => "gpt-4o",
278 Some("cl100k_base") => "gpt-4",
279 _ => "gpt-4o",
280 };
281
282 tiktoken_rs::num_tokens_from_messages(tokenizer_model, &messages)
283 .map(|tokens| tokens as u64)
284 })
285 .boxed()
286 }
287
288 fn stream_completion(
289 &self,
290 request: LanguageModelRequest,
291 cx: &AsyncApp,
292 ) -> BoxFuture<
293 'static,
294 Result<
295 BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>,
296 LanguageModelCompletionError,
297 >,
298 > {
299 let is_user_initiated = request.intent.is_none_or(|intent| match intent {
300 CompletionIntent::UserPrompt
301 | CompletionIntent::ThreadContextSummarization
302 | CompletionIntent::InlineAssist
303 | CompletionIntent::TerminalInlineAssist
304 | CompletionIntent::GenerateGitCommitMessage => true,
305
306 CompletionIntent::ToolResults
307 | CompletionIntent::ThreadSummarization
308 | CompletionIntent::CreateFile
309 | CompletionIntent::EditFile => false,
310 });
311
312 if self.model.supports_response() {
313 let responses_request = into_copilot_responses(&self.model, request);
314 let request_limiter = self.request_limiter.clone();
315 let future = cx.spawn(async move |cx| {
316 let request =
317 CopilotChat::stream_response(responses_request, is_user_initiated, cx.clone());
318 request_limiter
319 .stream(async move {
320 let stream = request.await?;
321 let mapper = CopilotResponsesEventMapper::new();
322 Ok(mapper.map_stream(stream).boxed())
323 })
324 .await
325 });
326 return async move { Ok(future.await?.boxed()) }.boxed();
327 }
328
329 let copilot_request = match into_copilot_chat(&self.model, request) {
330 Ok(request) => request,
331 Err(err) => return futures::future::ready(Err(err.into())).boxed(),
332 };
333 let is_streaming = copilot_request.stream;
334
335 let request_limiter = self.request_limiter.clone();
336 let future = cx.spawn(async move |cx| {
337 let request =
338 CopilotChat::stream_completion(copilot_request, is_user_initiated, cx.clone());
339 request_limiter
340 .stream(async move {
341 let response = request.await?;
342 Ok(map_to_language_model_completion_events(
343 response,
344 is_streaming,
345 ))
346 })
347 .await
348 });
349 async move { Ok(future.await?.boxed()) }.boxed()
350 }
351}
352
353pub fn map_to_language_model_completion_events(
354 events: Pin<Box<dyn Send + Stream<Item = Result<ResponseEvent>>>>,
355 is_streaming: bool,
356) -> impl Stream<Item = Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
357 #[derive(Default)]
358 struct RawToolCall {
359 id: String,
360 name: String,
361 arguments: String,
362 thought_signature: Option<String>,
363 }
364
365 struct State {
366 events: Pin<Box<dyn Send + Stream<Item = Result<ResponseEvent>>>>,
367 tool_calls_by_index: HashMap<usize, RawToolCall>,
368 reasoning_opaque: Option<String>,
369 reasoning_text: Option<String>,
370 }
371
372 futures::stream::unfold(
373 State {
374 events,
375 tool_calls_by_index: HashMap::default(),
376 reasoning_opaque: None,
377 reasoning_text: None,
378 },
379 move |mut state| async move {
380 if let Some(event) = state.events.next().await {
381 match event {
382 Ok(event) => {
383 let Some(choice) = event.choices.first() else {
384 return Some((
385 vec![Err(anyhow!("Response contained no choices").into())],
386 state,
387 ));
388 };
389
390 let delta = if is_streaming {
391 choice.delta.as_ref()
392 } else {
393 choice.message.as_ref()
394 };
395
396 let Some(delta) = delta else {
397 return Some((
398 vec![Err(anyhow!("Response contained no delta").into())],
399 state,
400 ));
401 };
402
403 let mut events = Vec::new();
404 if let Some(content) = delta.content.clone() {
405 events.push(Ok(LanguageModelCompletionEvent::Text(content)));
406 }
407
408 // Capture reasoning data from the delta (e.g. for Gemini 3)
409 if let Some(opaque) = delta.reasoning_opaque.clone() {
410 state.reasoning_opaque = Some(opaque);
411 }
412 if let Some(text) = delta.reasoning_text.clone() {
413 state.reasoning_text = Some(text);
414 }
415
416 for (index, tool_call) in delta.tool_calls.iter().enumerate() {
417 let tool_index = tool_call.index.unwrap_or(index);
418 let entry = state.tool_calls_by_index.entry(tool_index).or_default();
419
420 if let Some(tool_id) = tool_call.id.clone() {
421 entry.id = tool_id;
422 }
423
424 if let Some(function) = tool_call.function.as_ref() {
425 if let Some(name) = function.name.clone() {
426 entry.name = name;
427 }
428
429 if let Some(arguments) = function.arguments.clone() {
430 entry.arguments.push_str(&arguments);
431 }
432
433 if let Some(thought_signature) = function.thought_signature.clone()
434 {
435 entry.thought_signature = Some(thought_signature);
436 }
437 }
438 }
439
440 if let Some(usage) = event.usage {
441 events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(
442 TokenUsage {
443 input_tokens: usage.prompt_tokens,
444 output_tokens: usage.completion_tokens,
445 cache_creation_input_tokens: 0,
446 cache_read_input_tokens: 0,
447 },
448 )));
449 }
450
451 match choice.finish_reason.as_deref() {
452 Some("stop") => {
453 events.push(Ok(LanguageModelCompletionEvent::Stop(
454 StopReason::EndTurn,
455 )));
456 }
457 Some("tool_calls") => {
458 // Gemini 3 models send reasoning_opaque/reasoning_text that must
459 // be preserved and sent back in subsequent requests. Emit as
460 // ReasoningDetails so the agent stores it in the message.
461 if state.reasoning_opaque.is_some()
462 || state.reasoning_text.is_some()
463 {
464 let mut details = serde_json::Map::new();
465 if let Some(opaque) = state.reasoning_opaque.take() {
466 details.insert(
467 "reasoning_opaque".to_string(),
468 serde_json::Value::String(opaque),
469 );
470 }
471 if let Some(text) = state.reasoning_text.take() {
472 details.insert(
473 "reasoning_text".to_string(),
474 serde_json::Value::String(text),
475 );
476 }
477 events.push(Ok(
478 LanguageModelCompletionEvent::ReasoningDetails(
479 serde_json::Value::Object(details),
480 ),
481 ));
482 }
483
484 events.extend(state.tool_calls_by_index.drain().map(
485 |(_, tool_call)| {
486 // The model can output an empty string
487 // to indicate the absence of arguments.
488 // When that happens, create an empty
489 // object instead.
490 let arguments = if tool_call.arguments.is_empty() {
491 Ok(serde_json::Value::Object(Default::default()))
492 } else {
493 serde_json::Value::from_str(&tool_call.arguments)
494 };
495 match arguments {
496 Ok(input) => Ok(LanguageModelCompletionEvent::ToolUse(
497 LanguageModelToolUse {
498 id: tool_call.id.into(),
499 name: tool_call.name.as_str().into(),
500 is_input_complete: true,
501 input,
502 raw_input: tool_call.arguments,
503 thought_signature: tool_call.thought_signature,
504 },
505 )),
506 Err(error) => Ok(
507 LanguageModelCompletionEvent::ToolUseJsonParseError {
508 id: tool_call.id.into(),
509 tool_name: tool_call.name.as_str().into(),
510 raw_input: tool_call.arguments.into(),
511 json_parse_error: error.to_string(),
512 },
513 ),
514 }
515 },
516 ));
517
518 events.push(Ok(LanguageModelCompletionEvent::Stop(
519 StopReason::ToolUse,
520 )));
521 }
522 Some(stop_reason) => {
523 log::error!("Unexpected Copilot Chat stop_reason: {stop_reason:?}");
524 events.push(Ok(LanguageModelCompletionEvent::Stop(
525 StopReason::EndTurn,
526 )));
527 }
528 None => {}
529 }
530
531 return Some((events, state));
532 }
533 Err(err) => return Some((vec![Err(anyhow!(err).into())], state)),
534 }
535 }
536
537 None
538 },
539 )
540 .flat_map(futures::stream::iter)
541}
542
543pub struct CopilotResponsesEventMapper {
544 pending_stop_reason: Option<StopReason>,
545}
546
547impl CopilotResponsesEventMapper {
548 pub fn new() -> Self {
549 Self {
550 pending_stop_reason: None,
551 }
552 }
553
554 pub fn map_stream(
555 mut self,
556 events: Pin<Box<dyn Send + Stream<Item = Result<copilot::copilot_responses::StreamEvent>>>>,
557 ) -> impl Stream<Item = Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
558 {
559 events.flat_map(move |event| {
560 futures::stream::iter(match event {
561 Ok(event) => self.map_event(event),
562 Err(error) => vec![Err(LanguageModelCompletionError::from(anyhow!(error)))],
563 })
564 })
565 }
566
567 fn map_event(
568 &mut self,
569 event: copilot::copilot_responses::StreamEvent,
570 ) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
571 match event {
572 copilot::copilot_responses::StreamEvent::OutputItemAdded { item, .. } => match item {
573 copilot::copilot_responses::ResponseOutputItem::Message { id, .. } => {
574 vec![Ok(LanguageModelCompletionEvent::StartMessage {
575 message_id: id,
576 })]
577 }
578 _ => Vec::new(),
579 },
580
581 copilot::copilot_responses::StreamEvent::OutputTextDelta { delta, .. } => {
582 if delta.is_empty() {
583 Vec::new()
584 } else {
585 vec![Ok(LanguageModelCompletionEvent::Text(delta))]
586 }
587 }
588
589 copilot::copilot_responses::StreamEvent::OutputItemDone { item, .. } => match item {
590 copilot::copilot_responses::ResponseOutputItem::Message { .. } => Vec::new(),
591 copilot::copilot_responses::ResponseOutputItem::FunctionCall {
592 call_id,
593 name,
594 arguments,
595 thought_signature,
596 ..
597 } => {
598 let mut events = Vec::new();
599 match serde_json::from_str::<serde_json::Value>(&arguments) {
600 Ok(input) => events.push(Ok(LanguageModelCompletionEvent::ToolUse(
601 LanguageModelToolUse {
602 id: call_id.into(),
603 name: name.as_str().into(),
604 is_input_complete: true,
605 input,
606 raw_input: arguments.clone(),
607 thought_signature,
608 },
609 ))),
610 Err(error) => {
611 events.push(Ok(LanguageModelCompletionEvent::ToolUseJsonParseError {
612 id: call_id.into(),
613 tool_name: name.as_str().into(),
614 raw_input: arguments.clone().into(),
615 json_parse_error: error.to_string(),
616 }))
617 }
618 }
619 // Record that we already emitted a tool-use stop so we can avoid duplicating
620 // a Stop event on Completed.
621 self.pending_stop_reason = Some(StopReason::ToolUse);
622 events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::ToolUse)));
623 events
624 }
625 copilot::copilot_responses::ResponseOutputItem::Reasoning {
626 summary,
627 encrypted_content,
628 ..
629 } => {
630 let mut events = Vec::new();
631
632 if let Some(blocks) = summary {
633 let mut text = String::new();
634 for block in blocks {
635 text.push_str(&block.text);
636 }
637 if !text.is_empty() {
638 events.push(Ok(LanguageModelCompletionEvent::Thinking {
639 text,
640 signature: None,
641 }));
642 }
643 }
644
645 if let Some(data) = encrypted_content {
646 events.push(Ok(LanguageModelCompletionEvent::RedactedThinking { data }));
647 }
648
649 events
650 }
651 },
652
653 copilot::copilot_responses::StreamEvent::Completed { response } => {
654 let mut events = Vec::new();
655 if let Some(usage) = response.usage {
656 events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
657 input_tokens: usage.input_tokens.unwrap_or(0),
658 output_tokens: usage.output_tokens.unwrap_or(0),
659 cache_creation_input_tokens: 0,
660 cache_read_input_tokens: 0,
661 })));
662 }
663 if self.pending_stop_reason.take() != Some(StopReason::ToolUse) {
664 events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::EndTurn)));
665 }
666 events
667 }
668
669 copilot::copilot_responses::StreamEvent::Incomplete { response } => {
670 let reason = response
671 .incomplete_details
672 .as_ref()
673 .and_then(|details| details.reason.as_ref());
674 let stop_reason = match reason {
675 Some(copilot::copilot_responses::IncompleteReason::MaxOutputTokens) => {
676 StopReason::MaxTokens
677 }
678 Some(copilot::copilot_responses::IncompleteReason::ContentFilter) => {
679 StopReason::Refusal
680 }
681 _ => self
682 .pending_stop_reason
683 .take()
684 .unwrap_or(StopReason::EndTurn),
685 };
686
687 let mut events = Vec::new();
688 if let Some(usage) = response.usage {
689 events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
690 input_tokens: usage.input_tokens.unwrap_or(0),
691 output_tokens: usage.output_tokens.unwrap_or(0),
692 cache_creation_input_tokens: 0,
693 cache_read_input_tokens: 0,
694 })));
695 }
696 events.push(Ok(LanguageModelCompletionEvent::Stop(stop_reason)));
697 events
698 }
699
700 copilot::copilot_responses::StreamEvent::Failed { response } => {
701 let provider = PROVIDER_NAME;
702 let (status_code, message) = match response.error {
703 Some(error) => {
704 let status_code = StatusCode::from_str(&error.code)
705 .unwrap_or(StatusCode::INTERNAL_SERVER_ERROR);
706 (status_code, error.message)
707 }
708 None => (
709 StatusCode::INTERNAL_SERVER_ERROR,
710 "response.failed".to_string(),
711 ),
712 };
713 vec![Err(LanguageModelCompletionError::HttpResponseError {
714 provider,
715 status_code,
716 message,
717 })]
718 }
719
720 copilot::copilot_responses::StreamEvent::GenericError { error } => vec![Err(
721 LanguageModelCompletionError::Other(anyhow!(format!("{error:?}"))),
722 )],
723
724 copilot::copilot_responses::StreamEvent::Created { .. }
725 | copilot::copilot_responses::StreamEvent::Unknown => Vec::new(),
726 }
727 }
728}
729
730fn into_copilot_chat(
731 model: &copilot::copilot_chat::Model,
732 request: LanguageModelRequest,
733) -> Result<CopilotChatRequest> {
734 let mut request_messages: Vec<LanguageModelRequestMessage> = Vec::new();
735 for message in request.messages {
736 if let Some(last_message) = request_messages.last_mut() {
737 if last_message.role == message.role {
738 last_message.content.extend(message.content);
739 } else {
740 request_messages.push(message);
741 }
742 } else {
743 request_messages.push(message);
744 }
745 }
746
747 let mut messages: Vec<ChatMessage> = Vec::new();
748 for message in request_messages {
749 match message.role {
750 Role::User => {
751 for content in &message.content {
752 if let MessageContent::ToolResult(tool_result) = content {
753 let content = match &tool_result.content {
754 LanguageModelToolResultContent::Text(text) => text.to_string().into(),
755 LanguageModelToolResultContent::Image(image) => {
756 if model.supports_vision() {
757 ChatMessageContent::Multipart(vec![ChatMessagePart::Image {
758 image_url: ImageUrl {
759 url: image.to_base64_url(),
760 },
761 }])
762 } else {
763 debug_panic!(
764 "This should be caught at {} level",
765 tool_result.tool_name
766 );
767 "[Tool responded with an image, but this model does not support vision]".to_string().into()
768 }
769 }
770 };
771
772 messages.push(ChatMessage::Tool {
773 tool_call_id: tool_result.tool_use_id.to_string(),
774 content,
775 });
776 }
777 }
778
779 let mut content_parts = Vec::new();
780 for content in &message.content {
781 match content {
782 MessageContent::Text(text) | MessageContent::Thinking { text, .. }
783 if !text.is_empty() =>
784 {
785 if let Some(ChatMessagePart::Text { text: text_content }) =
786 content_parts.last_mut()
787 {
788 text_content.push_str(text);
789 } else {
790 content_parts.push(ChatMessagePart::Text {
791 text: text.to_string(),
792 });
793 }
794 }
795 MessageContent::Image(image) if model.supports_vision() => {
796 content_parts.push(ChatMessagePart::Image {
797 image_url: ImageUrl {
798 url: image.to_base64_url(),
799 },
800 });
801 }
802 _ => {}
803 }
804 }
805
806 if !content_parts.is_empty() {
807 messages.push(ChatMessage::User {
808 content: content_parts.into(),
809 });
810 }
811 }
812 Role::Assistant => {
813 let mut tool_calls = Vec::new();
814 for content in &message.content {
815 if let MessageContent::ToolUse(tool_use) = content {
816 tool_calls.push(ToolCall {
817 id: tool_use.id.to_string(),
818 content: copilot::copilot_chat::ToolCallContent::Function {
819 function: copilot::copilot_chat::FunctionContent {
820 name: tool_use.name.to_string(),
821 arguments: serde_json::to_string(&tool_use.input)?,
822 thought_signature: tool_use.thought_signature.clone(),
823 },
824 },
825 });
826 }
827 }
828
829 let text_content = {
830 let mut buffer = String::new();
831 for string in message.content.iter().filter_map(|content| match content {
832 MessageContent::Text(text) | MessageContent::Thinking { text, .. } => {
833 Some(text.as_str())
834 }
835 MessageContent::ToolUse(_)
836 | MessageContent::RedactedThinking(_)
837 | MessageContent::ToolResult(_)
838 | MessageContent::Image(_) => None,
839 }) {
840 buffer.push_str(string);
841 }
842
843 buffer
844 };
845
846 // Extract reasoning_opaque and reasoning_text from reasoning_details
847 let (reasoning_opaque, reasoning_text) =
848 if let Some(details) = &message.reasoning_details {
849 let opaque = details
850 .get("reasoning_opaque")
851 .and_then(|v| v.as_str())
852 .map(|s| s.to_string());
853 let text = details
854 .get("reasoning_text")
855 .and_then(|v| v.as_str())
856 .map(|s| s.to_string());
857 (opaque, text)
858 } else {
859 (None, None)
860 };
861
862 messages.push(ChatMessage::Assistant {
863 content: if text_content.is_empty() {
864 ChatMessageContent::empty()
865 } else {
866 text_content.into()
867 },
868 tool_calls,
869 reasoning_opaque,
870 reasoning_text,
871 });
872 }
873 Role::System => messages.push(ChatMessage::System {
874 content: message.string_contents(),
875 }),
876 }
877 }
878
879 let tools = request
880 .tools
881 .iter()
882 .map(|tool| Tool::Function {
883 function: copilot::copilot_chat::Function {
884 name: tool.name.clone(),
885 description: tool.description.clone(),
886 parameters: tool.input_schema.clone(),
887 },
888 })
889 .collect::<Vec<_>>();
890
891 Ok(CopilotChatRequest {
892 intent: true,
893 n: 1,
894 stream: model.uses_streaming(),
895 temperature: 0.1,
896 model: model.id().to_string(),
897 messages,
898 tools,
899 tool_choice: request.tool_choice.map(|choice| match choice {
900 LanguageModelToolChoice::Auto => copilot::copilot_chat::ToolChoice::Auto,
901 LanguageModelToolChoice::Any => copilot::copilot_chat::ToolChoice::Any,
902 LanguageModelToolChoice::None => copilot::copilot_chat::ToolChoice::None,
903 }),
904 })
905}
906
907fn into_copilot_responses(
908 model: &copilot::copilot_chat::Model,
909 request: LanguageModelRequest,
910) -> copilot::copilot_responses::Request {
911 use copilot::copilot_responses as responses;
912
913 let LanguageModelRequest {
914 thread_id: _,
915 prompt_id: _,
916 intent: _,
917 mode: _,
918 messages,
919 tools,
920 tool_choice,
921 stop: _,
922 temperature,
923 thinking_allowed: _,
924 } = request;
925
926 let mut input_items: Vec<responses::ResponseInputItem> = Vec::new();
927
928 for message in messages {
929 match message.role {
930 Role::User => {
931 for content in &message.content {
932 if let MessageContent::ToolResult(tool_result) = content {
933 let output = if let Some(out) = &tool_result.output {
934 match out {
935 serde_json::Value::String(s) => {
936 responses::ResponseFunctionOutput::Text(s.clone())
937 }
938 serde_json::Value::Null => {
939 responses::ResponseFunctionOutput::Text(String::new())
940 }
941 other => responses::ResponseFunctionOutput::Text(other.to_string()),
942 }
943 } else {
944 match &tool_result.content {
945 LanguageModelToolResultContent::Text(text) => {
946 responses::ResponseFunctionOutput::Text(text.to_string())
947 }
948 LanguageModelToolResultContent::Image(image) => {
949 if model.supports_vision() {
950 responses::ResponseFunctionOutput::Content(vec![
951 responses::ResponseInputContent::InputImage {
952 image_url: Some(image.to_base64_url()),
953 detail: Default::default(),
954 },
955 ])
956 } else {
957 debug_panic!(
958 "This should be caught at {} level",
959 tool_result.tool_name
960 );
961 responses::ResponseFunctionOutput::Text(
962 "[Tool responded with an image, but this model does not support vision]".into(),
963 )
964 }
965 }
966 }
967 };
968
969 input_items.push(responses::ResponseInputItem::FunctionCallOutput {
970 call_id: tool_result.tool_use_id.to_string(),
971 output,
972 status: None,
973 });
974 }
975 }
976
977 let mut parts: Vec<responses::ResponseInputContent> = Vec::new();
978 for content in &message.content {
979 match content {
980 MessageContent::Text(text) => {
981 parts.push(responses::ResponseInputContent::InputText {
982 text: text.clone(),
983 });
984 }
985
986 MessageContent::Image(image) => {
987 if model.supports_vision() {
988 parts.push(responses::ResponseInputContent::InputImage {
989 image_url: Some(image.to_base64_url()),
990 detail: Default::default(),
991 });
992 }
993 }
994 _ => {}
995 }
996 }
997
998 if !parts.is_empty() {
999 input_items.push(responses::ResponseInputItem::Message {
1000 role: "user".into(),
1001 content: Some(parts),
1002 status: None,
1003 });
1004 }
1005 }
1006
1007 Role::Assistant => {
1008 for content in &message.content {
1009 if let MessageContent::ToolUse(tool_use) = content {
1010 input_items.push(responses::ResponseInputItem::FunctionCall {
1011 call_id: tool_use.id.to_string(),
1012 name: tool_use.name.to_string(),
1013 arguments: tool_use.raw_input.clone(),
1014 status: None,
1015 thought_signature: tool_use.thought_signature.clone(),
1016 });
1017 }
1018 }
1019
1020 for content in &message.content {
1021 if let MessageContent::RedactedThinking(data) = content {
1022 input_items.push(responses::ResponseInputItem::Reasoning {
1023 id: None,
1024 summary: Vec::new(),
1025 encrypted_content: data.clone(),
1026 });
1027 }
1028 }
1029
1030 let mut parts: Vec<responses::ResponseInputContent> = Vec::new();
1031 for content in &message.content {
1032 match content {
1033 MessageContent::Text(text) => {
1034 parts.push(responses::ResponseInputContent::OutputText {
1035 text: text.clone(),
1036 });
1037 }
1038 MessageContent::Image(_) => {
1039 parts.push(responses::ResponseInputContent::OutputText {
1040 text: "[image omitted]".to_string(),
1041 });
1042 }
1043 _ => {}
1044 }
1045 }
1046
1047 if !parts.is_empty() {
1048 input_items.push(responses::ResponseInputItem::Message {
1049 role: "assistant".into(),
1050 content: Some(parts),
1051 status: Some("completed".into()),
1052 });
1053 }
1054 }
1055
1056 Role::System => {
1057 let mut parts: Vec<responses::ResponseInputContent> = Vec::new();
1058 for content in &message.content {
1059 if let MessageContent::Text(text) = content {
1060 parts.push(responses::ResponseInputContent::InputText {
1061 text: text.clone(),
1062 });
1063 }
1064 }
1065
1066 if !parts.is_empty() {
1067 input_items.push(responses::ResponseInputItem::Message {
1068 role: "system".into(),
1069 content: Some(parts),
1070 status: None,
1071 });
1072 }
1073 }
1074 }
1075 }
1076
1077 let converted_tools: Vec<responses::ToolDefinition> = tools
1078 .into_iter()
1079 .map(|tool| responses::ToolDefinition::Function {
1080 name: tool.name,
1081 description: Some(tool.description),
1082 parameters: Some(tool.input_schema),
1083 strict: None,
1084 })
1085 .collect();
1086
1087 let mapped_tool_choice = tool_choice.map(|choice| match choice {
1088 LanguageModelToolChoice::Auto => responses::ToolChoice::Auto,
1089 LanguageModelToolChoice::Any => responses::ToolChoice::Any,
1090 LanguageModelToolChoice::None => responses::ToolChoice::None,
1091 });
1092
1093 responses::Request {
1094 model: model.id().to_string(),
1095 input: input_items,
1096 stream: model.uses_streaming(),
1097 temperature,
1098 tools: converted_tools,
1099 tool_choice: mapped_tool_choice,
1100 reasoning: None, // We would need to add support for setting from user settings.
1101 include: Some(vec![
1102 copilot::copilot_responses::ResponseIncludable::ReasoningEncryptedContent,
1103 ]),
1104 }
1105}
1106
1107#[cfg(test)]
1108mod tests {
1109 use super::*;
1110 use copilot::copilot_responses as responses;
1111 use futures::StreamExt;
1112
1113 fn map_events(events: Vec<responses::StreamEvent>) -> Vec<LanguageModelCompletionEvent> {
1114 futures::executor::block_on(async {
1115 CopilotResponsesEventMapper::new()
1116 .map_stream(Box::pin(futures::stream::iter(events.into_iter().map(Ok))))
1117 .collect::<Vec<_>>()
1118 .await
1119 .into_iter()
1120 .map(Result::unwrap)
1121 .collect()
1122 })
1123 }
1124
1125 #[test]
1126 fn responses_stream_maps_text_and_usage() {
1127 let events = vec![
1128 responses::StreamEvent::OutputItemAdded {
1129 output_index: 0,
1130 sequence_number: None,
1131 item: responses::ResponseOutputItem::Message {
1132 id: "msg_1".into(),
1133 role: "assistant".into(),
1134 content: Some(Vec::new()),
1135 },
1136 },
1137 responses::StreamEvent::OutputTextDelta {
1138 item_id: "msg_1".into(),
1139 output_index: 0,
1140 delta: "Hello".into(),
1141 },
1142 responses::StreamEvent::Completed {
1143 response: responses::Response {
1144 usage: Some(responses::ResponseUsage {
1145 input_tokens: Some(5),
1146 output_tokens: Some(3),
1147 total_tokens: Some(8),
1148 }),
1149 ..Default::default()
1150 },
1151 },
1152 ];
1153
1154 let mapped = map_events(events);
1155 assert!(matches!(
1156 mapped[0],
1157 LanguageModelCompletionEvent::StartMessage { ref message_id } if message_id == "msg_1"
1158 ));
1159 assert!(matches!(
1160 mapped[1],
1161 LanguageModelCompletionEvent::Text(ref text) if text == "Hello"
1162 ));
1163 assert!(matches!(
1164 mapped[2],
1165 LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
1166 input_tokens: 5,
1167 output_tokens: 3,
1168 ..
1169 })
1170 ));
1171 assert!(matches!(
1172 mapped[3],
1173 LanguageModelCompletionEvent::Stop(StopReason::EndTurn)
1174 ));
1175 }
1176
1177 #[test]
1178 fn responses_stream_maps_tool_calls() {
1179 let events = vec![responses::StreamEvent::OutputItemDone {
1180 output_index: 0,
1181 sequence_number: None,
1182 item: responses::ResponseOutputItem::FunctionCall {
1183 id: Some("fn_1".into()),
1184 call_id: "call_1".into(),
1185 name: "do_it".into(),
1186 arguments: "{\"x\":1}".into(),
1187 status: None,
1188 thought_signature: None,
1189 },
1190 }];
1191
1192 let mapped = map_events(events);
1193 assert!(matches!(
1194 mapped[0],
1195 LanguageModelCompletionEvent::ToolUse(ref use_) if use_.id.to_string() == "call_1" && use_.name.as_ref() == "do_it"
1196 ));
1197 assert!(matches!(
1198 mapped[1],
1199 LanguageModelCompletionEvent::Stop(StopReason::ToolUse)
1200 ));
1201 }
1202
1203 #[test]
1204 fn responses_stream_handles_json_parse_error() {
1205 let events = vec![responses::StreamEvent::OutputItemDone {
1206 output_index: 0,
1207 sequence_number: None,
1208 item: responses::ResponseOutputItem::FunctionCall {
1209 id: Some("fn_1".into()),
1210 call_id: "call_1".into(),
1211 name: "do_it".into(),
1212 arguments: "{not json}".into(),
1213 status: None,
1214 thought_signature: None,
1215 },
1216 }];
1217
1218 let mapped = map_events(events);
1219 assert!(matches!(
1220 mapped[0],
1221 LanguageModelCompletionEvent::ToolUseJsonParseError { ref id, ref tool_name, .. }
1222 if id.to_string() == "call_1" && tool_name.as_ref() == "do_it"
1223 ));
1224 assert!(matches!(
1225 mapped[1],
1226 LanguageModelCompletionEvent::Stop(StopReason::ToolUse)
1227 ));
1228 }
1229
1230 #[test]
1231 fn responses_stream_maps_reasoning_summary_and_encrypted_content() {
1232 let events = vec![responses::StreamEvent::OutputItemDone {
1233 output_index: 0,
1234 sequence_number: None,
1235 item: responses::ResponseOutputItem::Reasoning {
1236 id: "r1".into(),
1237 summary: Some(vec![responses::ResponseReasoningItem {
1238 kind: "summary_text".into(),
1239 text: "Chain".into(),
1240 }]),
1241 encrypted_content: Some("ENC".into()),
1242 },
1243 }];
1244
1245 let mapped = map_events(events);
1246 assert!(matches!(
1247 mapped[0],
1248 LanguageModelCompletionEvent::Thinking { ref text, signature: None } if text == "Chain"
1249 ));
1250 assert!(matches!(
1251 mapped[1],
1252 LanguageModelCompletionEvent::RedactedThinking { ref data } if data == "ENC"
1253 ));
1254 }
1255
1256 #[test]
1257 fn responses_stream_handles_incomplete_max_tokens() {
1258 let events = vec![responses::StreamEvent::Incomplete {
1259 response: responses::Response {
1260 usage: Some(responses::ResponseUsage {
1261 input_tokens: Some(10),
1262 output_tokens: Some(0),
1263 total_tokens: Some(10),
1264 }),
1265 incomplete_details: Some(responses::IncompleteDetails {
1266 reason: Some(responses::IncompleteReason::MaxOutputTokens),
1267 }),
1268 ..Default::default()
1269 },
1270 }];
1271
1272 let mapped = map_events(events);
1273 assert!(matches!(
1274 mapped[0],
1275 LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
1276 input_tokens: 10,
1277 output_tokens: 0,
1278 ..
1279 })
1280 ));
1281 assert!(matches!(
1282 mapped[1],
1283 LanguageModelCompletionEvent::Stop(StopReason::MaxTokens)
1284 ));
1285 }
1286
1287 #[test]
1288 fn responses_stream_handles_incomplete_content_filter() {
1289 let events = vec![responses::StreamEvent::Incomplete {
1290 response: responses::Response {
1291 usage: None,
1292 incomplete_details: Some(responses::IncompleteDetails {
1293 reason: Some(responses::IncompleteReason::ContentFilter),
1294 }),
1295 ..Default::default()
1296 },
1297 }];
1298
1299 let mapped = map_events(events);
1300 assert!(matches!(
1301 mapped.last().unwrap(),
1302 LanguageModelCompletionEvent::Stop(StopReason::Refusal)
1303 ));
1304 }
1305
1306 #[test]
1307 fn responses_stream_completed_no_duplicate_after_tool_use() {
1308 let events = vec![
1309 responses::StreamEvent::OutputItemDone {
1310 output_index: 0,
1311 sequence_number: None,
1312 item: responses::ResponseOutputItem::FunctionCall {
1313 id: Some("fn_1".into()),
1314 call_id: "call_1".into(),
1315 name: "do_it".into(),
1316 arguments: "{}".into(),
1317 status: None,
1318 thought_signature: None,
1319 },
1320 },
1321 responses::StreamEvent::Completed {
1322 response: responses::Response::default(),
1323 },
1324 ];
1325
1326 let mapped = map_events(events);
1327
1328 let mut stop_count = 0usize;
1329 let mut saw_tool_use_stop = false;
1330 for event in mapped {
1331 if let LanguageModelCompletionEvent::Stop(reason) = event {
1332 stop_count += 1;
1333 if matches!(reason, StopReason::ToolUse) {
1334 saw_tool_use_stop = true;
1335 }
1336 }
1337 }
1338 assert_eq!(stop_count, 1, "should emit exactly one Stop event");
1339 assert!(saw_tool_use_stop, "Stop reason should be ToolUse");
1340 }
1341
1342 #[test]
1343 fn responses_stream_failed_maps_http_response_error() {
1344 let events = vec![responses::StreamEvent::Failed {
1345 response: responses::Response {
1346 error: Some(responses::ResponseError {
1347 code: "429".into(),
1348 message: "too many requests".into(),
1349 }),
1350 ..Default::default()
1351 },
1352 }];
1353
1354 let mapped_results = futures::executor::block_on(async {
1355 CopilotResponsesEventMapper::new()
1356 .map_stream(Box::pin(futures::stream::iter(events.into_iter().map(Ok))))
1357 .collect::<Vec<_>>()
1358 .await
1359 });
1360
1361 assert_eq!(mapped_results.len(), 1);
1362 match &mapped_results[0] {
1363 Err(LanguageModelCompletionError::HttpResponseError {
1364 status_code,
1365 message,
1366 ..
1367 }) => {
1368 assert_eq!(*status_code, http_client::StatusCode::TOO_MANY_REQUESTS);
1369 assert_eq!(message, "too many requests");
1370 }
1371 other => panic!("expected HttpResponseError, got {:?}", other),
1372 }
1373 }
1374
1375 #[test]
1376 fn chat_completions_stream_maps_reasoning_data() {
1377 use copilot::copilot_chat::ResponseEvent;
1378
1379 let events = vec![
1380 ResponseEvent {
1381 choices: vec![copilot::copilot_chat::ResponseChoice {
1382 index: Some(0),
1383 finish_reason: None,
1384 delta: Some(copilot::copilot_chat::ResponseDelta {
1385 content: None,
1386 role: Some(copilot::copilot_chat::Role::Assistant),
1387 tool_calls: vec![copilot::copilot_chat::ToolCallChunk {
1388 index: Some(0),
1389 id: Some("call_abc123".to_string()),
1390 function: Some(copilot::copilot_chat::FunctionChunk {
1391 name: Some("list_directory".to_string()),
1392 arguments: Some("{\"path\":\"test\"}".to_string()),
1393 thought_signature: None,
1394 }),
1395 }],
1396 reasoning_opaque: Some("encrypted_reasoning_token_xyz".to_string()),
1397 reasoning_text: Some("Let me check the directory".to_string()),
1398 }),
1399 message: None,
1400 }],
1401 id: "chatcmpl-123".to_string(),
1402 usage: None,
1403 },
1404 ResponseEvent {
1405 choices: vec![copilot::copilot_chat::ResponseChoice {
1406 index: Some(0),
1407 finish_reason: Some("tool_calls".to_string()),
1408 delta: Some(copilot::copilot_chat::ResponseDelta {
1409 content: None,
1410 role: None,
1411 tool_calls: vec![],
1412 reasoning_opaque: None,
1413 reasoning_text: None,
1414 }),
1415 message: None,
1416 }],
1417 id: "chatcmpl-123".to_string(),
1418 usage: None,
1419 },
1420 ];
1421
1422 let mapped = futures::executor::block_on(async {
1423 map_to_language_model_completion_events(
1424 Box::pin(futures::stream::iter(events.into_iter().map(Ok))),
1425 true,
1426 )
1427 .collect::<Vec<_>>()
1428 .await
1429 });
1430
1431 let mut has_reasoning_details = false;
1432 let mut has_tool_use = false;
1433 let mut reasoning_opaque_value: Option<String> = None;
1434 let mut reasoning_text_value: Option<String> = None;
1435
1436 for event_result in mapped {
1437 match event_result {
1438 Ok(LanguageModelCompletionEvent::ReasoningDetails(details)) => {
1439 has_reasoning_details = true;
1440 reasoning_opaque_value = details
1441 .get("reasoning_opaque")
1442 .and_then(|v| v.as_str())
1443 .map(|s| s.to_string());
1444 reasoning_text_value = details
1445 .get("reasoning_text")
1446 .and_then(|v| v.as_str())
1447 .map(|s| s.to_string());
1448 }
1449 Ok(LanguageModelCompletionEvent::ToolUse(tool_use)) => {
1450 has_tool_use = true;
1451 assert_eq!(tool_use.id.to_string(), "call_abc123");
1452 assert_eq!(tool_use.name.as_ref(), "list_directory");
1453 }
1454 _ => {}
1455 }
1456 }
1457
1458 assert!(
1459 has_reasoning_details,
1460 "Should emit ReasoningDetails event for Gemini 3 reasoning"
1461 );
1462 assert!(has_tool_use, "Should emit ToolUse event");
1463 assert_eq!(
1464 reasoning_opaque_value,
1465 Some("encrypted_reasoning_token_xyz".to_string()),
1466 "Should capture reasoning_opaque"
1467 );
1468 assert_eq!(
1469 reasoning_text_value,
1470 Some("Let me check the directory".to_string()),
1471 "Should capture reasoning_text"
1472 );
1473 }
1474}
1475struct ConfigurationView {
1476 copilot_status: Option<copilot::Status>,
1477 state: Entity<State>,
1478 _subscription: Option<Subscription>,
1479}
1480
1481impl ConfigurationView {
1482 pub fn new(state: Entity<State>, cx: &mut Context<Self>) -> Self {
1483 let copilot = Copilot::global(cx);
1484
1485 Self {
1486 copilot_status: copilot.as_ref().map(|copilot| copilot.read(cx).status()),
1487 state,
1488 _subscription: copilot.as_ref().map(|copilot| {
1489 cx.observe(copilot, |this, model, cx| {
1490 this.copilot_status = Some(model.read(cx).status());
1491 cx.notify();
1492 })
1493 }),
1494 }
1495 }
1496}
1497
1498impl Render for ConfigurationView {
1499 fn render(&mut self, _window: &mut Window, cx: &mut Context<Self>) -> impl IntoElement {
1500 if self.state.read(cx).is_authenticated(cx) {
1501 ConfiguredApiCard::new("Authorized")
1502 .button_label("Sign Out")
1503 .on_click(|_, window, cx| {
1504 window.dispatch_action(copilot::SignOut.boxed_clone(), cx);
1505 })
1506 .into_any_element()
1507 } else {
1508 let loading_icon = Icon::new(IconName::ArrowCircle).with_rotate_animation(4);
1509
1510 const ERROR_LABEL: &str = "Copilot Chat requires an active GitHub Copilot subscription. Please ensure Copilot is configured and try again, or use a different Assistant provider.";
1511
1512 match &self.copilot_status {
1513 Some(status) => match status {
1514 Status::Starting { task: _ } => h_flex()
1515 .gap_2()
1516 .child(loading_icon)
1517 .child(Label::new("Starting Copilot…"))
1518 .into_any_element(),
1519 Status::SigningIn { prompt: _ }
1520 | Status::SignedOut {
1521 awaiting_signing_in: true,
1522 } => h_flex()
1523 .gap_2()
1524 .child(loading_icon)
1525 .child(Label::new("Signing into Copilot…"))
1526 .into_any_element(),
1527 Status::Error(_) => {
1528 const LABEL: &str = "Copilot had issues starting. Please try restarting it. If the issue persists, try reinstalling Copilot.";
1529 v_flex()
1530 .gap_6()
1531 .child(Label::new(LABEL))
1532 .child(svg().size_8().path(IconName::CopilotError.path()))
1533 .into_any_element()
1534 }
1535 _ => {
1536 const LABEL: &str = "To use Zed's agent with GitHub Copilot, you need to be logged in to GitHub. Note that your GitHub account must have an active Copilot Chat subscription.";
1537
1538 v_flex()
1539 .gap_2()
1540 .child(Label::new(LABEL))
1541 .child(
1542 Button::new("sign_in", "Sign in to use GitHub Copilot")
1543 .full_width()
1544 .style(ButtonStyle::Outlined)
1545 .icon_color(Color::Muted)
1546 .icon(IconName::Github)
1547 .icon_position(IconPosition::Start)
1548 .icon_size(IconSize::Small)
1549 .on_click(|_, window, cx| {
1550 copilot::initiate_sign_in(window, cx)
1551 }),
1552 )
1553 .into_any_element()
1554 }
1555 },
1556 None => v_flex()
1557 .gap_6()
1558 .child(Label::new(ERROR_LABEL))
1559 .into_any_element(),
1560 }
1561 }
1562 }
1563}