1use std::pin::Pin;
2use std::str::FromStr as _;
3use std::sync::Arc;
4
5use anyhow::{Result, anyhow};
6use cloud_llm_client::CompletionIntent;
7use collections::HashMap;
8use copilot::copilot_chat::{
9 ChatMessage, ChatMessageContent, ChatMessagePart, CopilotChat, ImageUrl,
10 Model as CopilotChatModel, ModelVendor, Request as CopilotChatRequest, ResponseEvent, Tool,
11 ToolCall,
12};
13use copilot::{Copilot, Status};
14use futures::future::BoxFuture;
15use futures::stream::BoxStream;
16use futures::{FutureExt, Stream, StreamExt};
17use gpui::{Action, AnyView, App, AsyncApp, Entity, Render, Subscription, Task, svg};
18use http_client::StatusCode;
19use language::language_settings::all_language_settings;
20use language_model::{
21 AuthenticateError, LanguageModel, LanguageModelCompletionError, LanguageModelCompletionEvent,
22 LanguageModelId, LanguageModelName, LanguageModelProvider, LanguageModelProviderId,
23 LanguageModelProviderName, LanguageModelProviderState, LanguageModelRequest,
24 LanguageModelRequestMessage, LanguageModelToolChoice, LanguageModelToolResultContent,
25 LanguageModelToolSchemaFormat, LanguageModelToolUse, MessageContent, RateLimiter, Role,
26 StopReason, TokenUsage,
27};
28use settings::SettingsStore;
29use ui::{CommonAnimationExt, prelude::*};
30use util::debug_panic;
31
32use crate::ui::ConfiguredApiCard;
33
34const PROVIDER_ID: LanguageModelProviderId = LanguageModelProviderId::new("copilot_chat");
35const PROVIDER_NAME: LanguageModelProviderName =
36 LanguageModelProviderName::new("GitHub Copilot Chat");
37
38pub struct CopilotChatLanguageModelProvider {
39 state: Entity<State>,
40}
41
42pub struct State {
43 _copilot_chat_subscription: Option<Subscription>,
44 _settings_subscription: Subscription,
45}
46
47impl State {
48 fn is_authenticated(&self, cx: &App) -> bool {
49 CopilotChat::global(cx)
50 .map(|m| m.read(cx).is_authenticated())
51 .unwrap_or(false)
52 }
53}
54
55impl CopilotChatLanguageModelProvider {
56 pub fn new(cx: &mut App) -> Self {
57 let state = cx.new(|cx| {
58 let copilot_chat_subscription = CopilotChat::global(cx)
59 .map(|copilot_chat| cx.observe(&copilot_chat, |_, _, cx| cx.notify()));
60 State {
61 _copilot_chat_subscription: copilot_chat_subscription,
62 _settings_subscription: cx.observe_global::<SettingsStore>(|_, cx| {
63 if let Some(copilot_chat) = CopilotChat::global(cx) {
64 let language_settings = all_language_settings(None, cx);
65 let configuration = copilot::copilot_chat::CopilotChatConfiguration {
66 enterprise_uri: language_settings
67 .edit_predictions
68 .copilot
69 .enterprise_uri
70 .clone(),
71 };
72 copilot_chat.update(cx, |chat, cx| {
73 chat.set_configuration(configuration, cx);
74 });
75 }
76 cx.notify();
77 }),
78 }
79 });
80
81 Self { state }
82 }
83
84 fn create_language_model(&self, model: CopilotChatModel) -> Arc<dyn LanguageModel> {
85 Arc::new(CopilotChatLanguageModel {
86 model,
87 request_limiter: RateLimiter::new(4),
88 })
89 }
90}
91
92impl LanguageModelProviderState for CopilotChatLanguageModelProvider {
93 type ObservableEntity = State;
94
95 fn observable_entity(&self) -> Option<Entity<Self::ObservableEntity>> {
96 Some(self.state.clone())
97 }
98}
99
100impl LanguageModelProvider for CopilotChatLanguageModelProvider {
101 fn id(&self) -> LanguageModelProviderId {
102 PROVIDER_ID
103 }
104
105 fn name(&self) -> LanguageModelProviderName {
106 PROVIDER_NAME
107 }
108
109 fn icon(&self) -> IconName {
110 IconName::Copilot
111 }
112
113 fn default_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
114 let models = CopilotChat::global(cx).and_then(|m| m.read(cx).models())?;
115 models
116 .first()
117 .map(|model| self.create_language_model(model.clone()))
118 }
119
120 fn default_fast_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
121 // The default model should be Copilot Chat's 'base model', which is likely a relatively fast
122 // model (e.g. 4o) and a sensible choice when considering premium requests
123 self.default_model(cx)
124 }
125
126 fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
127 let Some(models) = CopilotChat::global(cx).and_then(|m| m.read(cx).models()) else {
128 return Vec::new();
129 };
130 models
131 .iter()
132 .map(|model| self.create_language_model(model.clone()))
133 .collect()
134 }
135
136 fn is_authenticated(&self, cx: &App) -> bool {
137 self.state.read(cx).is_authenticated(cx)
138 }
139
140 fn authenticate(&self, cx: &mut App) -> Task<Result<(), AuthenticateError>> {
141 if self.is_authenticated(cx) {
142 return Task::ready(Ok(()));
143 };
144
145 let Some(copilot) = Copilot::global(cx) else {
146 return Task::ready(Err(anyhow!(concat!(
147 "Copilot must be enabled for Copilot Chat to work. ",
148 "Please enable Copilot and try again."
149 ))
150 .into()));
151 };
152
153 let err = match copilot.read(cx).status() {
154 Status::Authorized => return Task::ready(Ok(())),
155 Status::Disabled => anyhow!(
156 "Copilot must be enabled for Copilot Chat to work. Please enable Copilot and try again."
157 ),
158 Status::Error(err) => anyhow!(format!(
159 "Received the following error while signing into Copilot: {err}"
160 )),
161 Status::Starting { task: _ } => anyhow!(
162 "Copilot is still starting, please wait for Copilot to start then try again"
163 ),
164 Status::Unauthorized => anyhow!(
165 "Unable to authorize with Copilot. Please make sure that you have an active Copilot and Copilot Chat subscription."
166 ),
167 Status::SignedOut { .. } => {
168 anyhow!("You have signed out of Copilot. Please sign in to Copilot and try again.")
169 }
170 Status::SigningIn { prompt: _ } => anyhow!("Still signing into Copilot..."),
171 };
172
173 Task::ready(Err(err.into()))
174 }
175
176 fn configuration_view(
177 &self,
178 _target_agent: language_model::ConfigurationViewTargetAgent,
179 _: &mut Window,
180 cx: &mut App,
181 ) -> AnyView {
182 let state = self.state.clone();
183 cx.new(|cx| ConfigurationView::new(state, cx)).into()
184 }
185
186 fn reset_credentials(&self, _cx: &mut App) -> Task<Result<()>> {
187 Task::ready(Err(anyhow!(
188 "Signing out of GitHub Copilot Chat is currently not supported."
189 )))
190 }
191}
192
193fn collect_tiktoken_messages(
194 request: LanguageModelRequest,
195) -> Vec<tiktoken_rs::ChatCompletionRequestMessage> {
196 request
197 .messages
198 .into_iter()
199 .map(|message| tiktoken_rs::ChatCompletionRequestMessage {
200 role: match message.role {
201 Role::User => "user".into(),
202 Role::Assistant => "assistant".into(),
203 Role::System => "system".into(),
204 },
205 content: Some(message.string_contents()),
206 name: None,
207 function_call: None,
208 })
209 .collect::<Vec<_>>()
210}
211
212pub struct CopilotChatLanguageModel {
213 model: CopilotChatModel,
214 request_limiter: RateLimiter,
215}
216
217impl LanguageModel for CopilotChatLanguageModel {
218 fn id(&self) -> LanguageModelId {
219 LanguageModelId::from(self.model.id().to_string())
220 }
221
222 fn name(&self) -> LanguageModelName {
223 LanguageModelName::from(self.model.display_name().to_string())
224 }
225
226 fn provider_id(&self) -> LanguageModelProviderId {
227 PROVIDER_ID
228 }
229
230 fn provider_name(&self) -> LanguageModelProviderName {
231 PROVIDER_NAME
232 }
233
234 fn supports_tools(&self) -> bool {
235 self.model.supports_tools()
236 }
237
238 fn supports_images(&self) -> bool {
239 self.model.supports_vision()
240 }
241
242 fn tool_input_format(&self) -> LanguageModelToolSchemaFormat {
243 match self.model.vendor() {
244 ModelVendor::OpenAI | ModelVendor::Anthropic => {
245 LanguageModelToolSchemaFormat::JsonSchema
246 }
247 ModelVendor::Google | ModelVendor::XAI | ModelVendor::Unknown => {
248 LanguageModelToolSchemaFormat::JsonSchemaSubset
249 }
250 }
251 }
252
253 fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
254 match choice {
255 LanguageModelToolChoice::Auto
256 | LanguageModelToolChoice::Any
257 | LanguageModelToolChoice::None => self.supports_tools(),
258 }
259 }
260
261 fn telemetry_id(&self) -> String {
262 format!("copilot_chat/{}", self.model.id())
263 }
264
265 fn max_token_count(&self) -> u64 {
266 self.model.max_token_count()
267 }
268
269 fn count_tokens(
270 &self,
271 request: LanguageModelRequest,
272 cx: &App,
273 ) -> BoxFuture<'static, Result<u64>> {
274 let model = self.model.clone();
275 cx.background_spawn(async move {
276 let messages = collect_tiktoken_messages(request);
277 // Copilot uses OpenAI tiktoken tokenizer for all it's model irrespective of the underlying provider(vendor).
278 let tokenizer_model = match model.tokenizer() {
279 Some("o200k_base") => "gpt-4o",
280 Some("cl100k_base") => "gpt-4",
281 _ => "gpt-4o",
282 };
283
284 tiktoken_rs::num_tokens_from_messages(tokenizer_model, &messages)
285 .map(|tokens| tokens as u64)
286 })
287 .boxed()
288 }
289
290 fn stream_completion(
291 &self,
292 request: LanguageModelRequest,
293 cx: &AsyncApp,
294 ) -> BoxFuture<
295 'static,
296 Result<
297 BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>,
298 LanguageModelCompletionError,
299 >,
300 > {
301 let is_user_initiated = request.intent.is_none_or(|intent| match intent {
302 CompletionIntent::UserPrompt
303 | CompletionIntent::ThreadContextSummarization
304 | CompletionIntent::InlineAssist
305 | CompletionIntent::TerminalInlineAssist
306 | CompletionIntent::GenerateGitCommitMessage => true,
307
308 CompletionIntent::ToolResults
309 | CompletionIntent::ThreadSummarization
310 | CompletionIntent::CreateFile
311 | CompletionIntent::EditFile => false,
312 });
313
314 if self.model.supports_response() {
315 let responses_request = into_copilot_responses(&self.model, request);
316 let request_limiter = self.request_limiter.clone();
317 let future = cx.spawn(async move |cx| {
318 let request =
319 CopilotChat::stream_response(responses_request, is_user_initiated, cx.clone());
320 request_limiter
321 .stream(async move {
322 let stream = request.await?;
323 let mapper = CopilotResponsesEventMapper::new();
324 Ok(mapper.map_stream(stream).boxed())
325 })
326 .await
327 });
328 return async move { Ok(future.await?.boxed()) }.boxed();
329 }
330
331 let copilot_request = match into_copilot_chat(&self.model, request) {
332 Ok(request) => request,
333 Err(err) => return futures::future::ready(Err(err.into())).boxed(),
334 };
335 let is_streaming = copilot_request.stream;
336
337 let request_limiter = self.request_limiter.clone();
338 let future = cx.spawn(async move |cx| {
339 let request =
340 CopilotChat::stream_completion(copilot_request, is_user_initiated, cx.clone());
341 request_limiter
342 .stream(async move {
343 let response = request.await?;
344 Ok(map_to_language_model_completion_events(
345 response,
346 is_streaming,
347 ))
348 })
349 .await
350 });
351 async move { Ok(future.await?.boxed()) }.boxed()
352 }
353}
354
355pub fn map_to_language_model_completion_events(
356 events: Pin<Box<dyn Send + Stream<Item = Result<ResponseEvent>>>>,
357 is_streaming: bool,
358) -> impl Stream<Item = Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
359 #[derive(Default)]
360 struct RawToolCall {
361 id: String,
362 name: String,
363 arguments: String,
364 }
365
366 struct State {
367 events: Pin<Box<dyn Send + Stream<Item = Result<ResponseEvent>>>>,
368 tool_calls_by_index: HashMap<usize, RawToolCall>,
369 }
370
371 futures::stream::unfold(
372 State {
373 events,
374 tool_calls_by_index: HashMap::default(),
375 },
376 move |mut state| async move {
377 if let Some(event) = state.events.next().await {
378 match event {
379 Ok(event) => {
380 let Some(choice) = event.choices.first() else {
381 return Some((
382 vec![Err(anyhow!("Response contained no choices").into())],
383 state,
384 ));
385 };
386
387 let delta = if is_streaming {
388 choice.delta.as_ref()
389 } else {
390 choice.message.as_ref()
391 };
392
393 let Some(delta) = delta else {
394 return Some((
395 vec![Err(anyhow!("Response contained no delta").into())],
396 state,
397 ));
398 };
399
400 let mut events = Vec::new();
401 if let Some(content) = delta.content.clone() {
402 events.push(Ok(LanguageModelCompletionEvent::Text(content)));
403 }
404
405 for (index, tool_call) in delta.tool_calls.iter().enumerate() {
406 let tool_index = tool_call.index.unwrap_or(index);
407 let entry = state.tool_calls_by_index.entry(tool_index).or_default();
408
409 if let Some(tool_id) = tool_call.id.clone() {
410 entry.id = tool_id;
411 }
412
413 if let Some(function) = tool_call.function.as_ref() {
414 if let Some(name) = function.name.clone() {
415 entry.name = name;
416 }
417
418 if let Some(arguments) = function.arguments.clone() {
419 entry.arguments.push_str(&arguments);
420 }
421 }
422 }
423
424 if let Some(usage) = event.usage {
425 events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(
426 TokenUsage {
427 input_tokens: usage.prompt_tokens,
428 output_tokens: usage.completion_tokens,
429 cache_creation_input_tokens: 0,
430 cache_read_input_tokens: 0,
431 },
432 )));
433 }
434
435 match choice.finish_reason.as_deref() {
436 Some("stop") => {
437 events.push(Ok(LanguageModelCompletionEvent::Stop(
438 StopReason::EndTurn,
439 )));
440 }
441 Some("tool_calls") => {
442 events.extend(state.tool_calls_by_index.drain().map(
443 |(_, tool_call)| {
444 // The model can output an empty string
445 // to indicate the absence of arguments.
446 // When that happens, create an empty
447 // object instead.
448 let arguments = if tool_call.arguments.is_empty() {
449 Ok(serde_json::Value::Object(Default::default()))
450 } else {
451 serde_json::Value::from_str(&tool_call.arguments)
452 };
453 match arguments {
454 Ok(input) => Ok(LanguageModelCompletionEvent::ToolUse(
455 LanguageModelToolUse {
456 id: tool_call.id.into(),
457 name: tool_call.name.as_str().into(),
458 is_input_complete: true,
459 input,
460 raw_input: tool_call.arguments,
461 },
462 )),
463 Err(error) => Ok(
464 LanguageModelCompletionEvent::ToolUseJsonParseError {
465 id: tool_call.id.into(),
466 tool_name: tool_call.name.as_str().into(),
467 raw_input: tool_call.arguments.into(),
468 json_parse_error: error.to_string(),
469 },
470 ),
471 }
472 },
473 ));
474
475 events.push(Ok(LanguageModelCompletionEvent::Stop(
476 StopReason::ToolUse,
477 )));
478 }
479 Some(stop_reason) => {
480 log::error!("Unexpected Copilot Chat stop_reason: {stop_reason:?}");
481 events.push(Ok(LanguageModelCompletionEvent::Stop(
482 StopReason::EndTurn,
483 )));
484 }
485 None => {}
486 }
487
488 return Some((events, state));
489 }
490 Err(err) => return Some((vec![Err(anyhow!(err).into())], state)),
491 }
492 }
493
494 None
495 },
496 )
497 .flat_map(futures::stream::iter)
498}
499
500pub struct CopilotResponsesEventMapper {
501 pending_stop_reason: Option<StopReason>,
502}
503
504impl CopilotResponsesEventMapper {
505 pub fn new() -> Self {
506 Self {
507 pending_stop_reason: None,
508 }
509 }
510
511 pub fn map_stream(
512 mut self,
513 events: Pin<Box<dyn Send + Stream<Item = Result<copilot::copilot_responses::StreamEvent>>>>,
514 ) -> impl Stream<Item = Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
515 {
516 events.flat_map(move |event| {
517 futures::stream::iter(match event {
518 Ok(event) => self.map_event(event),
519 Err(error) => vec![Err(LanguageModelCompletionError::from(anyhow!(error)))],
520 })
521 })
522 }
523
524 fn map_event(
525 &mut self,
526 event: copilot::copilot_responses::StreamEvent,
527 ) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
528 match event {
529 copilot::copilot_responses::StreamEvent::OutputItemAdded { item, .. } => match item {
530 copilot::copilot_responses::ResponseOutputItem::Message { id, .. } => {
531 vec![Ok(LanguageModelCompletionEvent::StartMessage {
532 message_id: id,
533 })]
534 }
535 _ => Vec::new(),
536 },
537
538 copilot::copilot_responses::StreamEvent::OutputTextDelta { delta, .. } => {
539 if delta.is_empty() {
540 Vec::new()
541 } else {
542 vec![Ok(LanguageModelCompletionEvent::Text(delta))]
543 }
544 }
545
546 copilot::copilot_responses::StreamEvent::OutputItemDone { item, .. } => match item {
547 copilot::copilot_responses::ResponseOutputItem::Message { .. } => Vec::new(),
548 copilot::copilot_responses::ResponseOutputItem::FunctionCall {
549 call_id,
550 name,
551 arguments,
552 ..
553 } => {
554 let mut events = Vec::new();
555 match serde_json::from_str::<serde_json::Value>(&arguments) {
556 Ok(input) => events.push(Ok(LanguageModelCompletionEvent::ToolUse(
557 LanguageModelToolUse {
558 id: call_id.into(),
559 name: name.as_str().into(),
560 is_input_complete: true,
561 input,
562 raw_input: arguments.clone(),
563 },
564 ))),
565 Err(error) => {
566 events.push(Ok(LanguageModelCompletionEvent::ToolUseJsonParseError {
567 id: call_id.into(),
568 tool_name: name.as_str().into(),
569 raw_input: arguments.clone().into(),
570 json_parse_error: error.to_string(),
571 }))
572 }
573 }
574 // Record that we already emitted a tool-use stop so we can avoid duplicating
575 // a Stop event on Completed.
576 self.pending_stop_reason = Some(StopReason::ToolUse);
577 events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::ToolUse)));
578 events
579 }
580 copilot::copilot_responses::ResponseOutputItem::Reasoning {
581 summary,
582 encrypted_content,
583 ..
584 } => {
585 let mut events = Vec::new();
586
587 if let Some(blocks) = summary {
588 let mut text = String::new();
589 for block in blocks {
590 text.push_str(&block.text);
591 }
592 if !text.is_empty() {
593 events.push(Ok(LanguageModelCompletionEvent::Thinking {
594 text,
595 signature: None,
596 }));
597 }
598 }
599
600 if let Some(data) = encrypted_content {
601 events.push(Ok(LanguageModelCompletionEvent::RedactedThinking { data }));
602 }
603
604 events
605 }
606 },
607
608 copilot::copilot_responses::StreamEvent::Completed { response } => {
609 let mut events = Vec::new();
610 if let Some(usage) = response.usage {
611 events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
612 input_tokens: usage.input_tokens.unwrap_or(0),
613 output_tokens: usage.output_tokens.unwrap_or(0),
614 cache_creation_input_tokens: 0,
615 cache_read_input_tokens: 0,
616 })));
617 }
618 if self.pending_stop_reason.take() != Some(StopReason::ToolUse) {
619 events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::EndTurn)));
620 }
621 events
622 }
623
624 copilot::copilot_responses::StreamEvent::Incomplete { response } => {
625 let reason = response
626 .incomplete_details
627 .as_ref()
628 .and_then(|details| details.reason.as_ref());
629 let stop_reason = match reason {
630 Some(copilot::copilot_responses::IncompleteReason::MaxOutputTokens) => {
631 StopReason::MaxTokens
632 }
633 Some(copilot::copilot_responses::IncompleteReason::ContentFilter) => {
634 StopReason::Refusal
635 }
636 _ => self
637 .pending_stop_reason
638 .take()
639 .unwrap_or(StopReason::EndTurn),
640 };
641
642 let mut events = Vec::new();
643 if let Some(usage) = response.usage {
644 events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
645 input_tokens: usage.input_tokens.unwrap_or(0),
646 output_tokens: usage.output_tokens.unwrap_or(0),
647 cache_creation_input_tokens: 0,
648 cache_read_input_tokens: 0,
649 })));
650 }
651 events.push(Ok(LanguageModelCompletionEvent::Stop(stop_reason)));
652 events
653 }
654
655 copilot::copilot_responses::StreamEvent::Failed { response } => {
656 let provider = PROVIDER_NAME;
657 let (status_code, message) = match response.error {
658 Some(error) => {
659 let status_code = StatusCode::from_str(&error.code)
660 .unwrap_or(StatusCode::INTERNAL_SERVER_ERROR);
661 (status_code, error.message)
662 }
663 None => (
664 StatusCode::INTERNAL_SERVER_ERROR,
665 "response.failed".to_string(),
666 ),
667 };
668 vec![Err(LanguageModelCompletionError::HttpResponseError {
669 provider,
670 status_code,
671 message,
672 })]
673 }
674
675 copilot::copilot_responses::StreamEvent::GenericError { error } => vec![Err(
676 LanguageModelCompletionError::Other(anyhow!(format!("{error:?}"))),
677 )],
678
679 copilot::copilot_responses::StreamEvent::Created { .. }
680 | copilot::copilot_responses::StreamEvent::Unknown => Vec::new(),
681 }
682 }
683}
684
685fn into_copilot_chat(
686 model: &copilot::copilot_chat::Model,
687 request: LanguageModelRequest,
688) -> Result<CopilotChatRequest> {
689 let mut request_messages: Vec<LanguageModelRequestMessage> = Vec::new();
690 for message in request.messages {
691 if let Some(last_message) = request_messages.last_mut() {
692 if last_message.role == message.role {
693 last_message.content.extend(message.content);
694 } else {
695 request_messages.push(message);
696 }
697 } else {
698 request_messages.push(message);
699 }
700 }
701
702 let mut messages: Vec<ChatMessage> = Vec::new();
703 for message in request_messages {
704 match message.role {
705 Role::User => {
706 for content in &message.content {
707 if let MessageContent::ToolResult(tool_result) = content {
708 let content = match &tool_result.content {
709 LanguageModelToolResultContent::Text(text) => text.to_string().into(),
710 LanguageModelToolResultContent::Image(image) => {
711 if model.supports_vision() {
712 ChatMessageContent::Multipart(vec![ChatMessagePart::Image {
713 image_url: ImageUrl {
714 url: image.to_base64_url(),
715 },
716 }])
717 } else {
718 debug_panic!(
719 "This should be caught at {} level",
720 tool_result.tool_name
721 );
722 "[Tool responded with an image, but this model does not support vision]".to_string().into()
723 }
724 }
725 };
726
727 messages.push(ChatMessage::Tool {
728 tool_call_id: tool_result.tool_use_id.to_string(),
729 content,
730 });
731 }
732 }
733
734 let mut content_parts = Vec::new();
735 for content in &message.content {
736 match content {
737 MessageContent::Text(text) | MessageContent::Thinking { text, .. }
738 if !text.is_empty() =>
739 {
740 if let Some(ChatMessagePart::Text { text: text_content }) =
741 content_parts.last_mut()
742 {
743 text_content.push_str(text);
744 } else {
745 content_parts.push(ChatMessagePart::Text {
746 text: text.to_string(),
747 });
748 }
749 }
750 MessageContent::Image(image) if model.supports_vision() => {
751 content_parts.push(ChatMessagePart::Image {
752 image_url: ImageUrl {
753 url: image.to_base64_url(),
754 },
755 });
756 }
757 _ => {}
758 }
759 }
760
761 if !content_parts.is_empty() {
762 messages.push(ChatMessage::User {
763 content: content_parts.into(),
764 });
765 }
766 }
767 Role::Assistant => {
768 let mut tool_calls = Vec::new();
769 for content in &message.content {
770 if let MessageContent::ToolUse(tool_use) = content {
771 tool_calls.push(ToolCall {
772 id: tool_use.id.to_string(),
773 content: copilot::copilot_chat::ToolCallContent::Function {
774 function: copilot::copilot_chat::FunctionContent {
775 name: tool_use.name.to_string(),
776 arguments: serde_json::to_string(&tool_use.input)?,
777 },
778 },
779 });
780 }
781 }
782
783 let text_content = {
784 let mut buffer = String::new();
785 for string in message.content.iter().filter_map(|content| match content {
786 MessageContent::Text(text) | MessageContent::Thinking { text, .. } => {
787 Some(text.as_str())
788 }
789 MessageContent::ToolUse(_)
790 | MessageContent::RedactedThinking(_)
791 | MessageContent::ToolResult(_)
792 | MessageContent::Image(_) => None,
793 }) {
794 buffer.push_str(string);
795 }
796
797 buffer
798 };
799
800 messages.push(ChatMessage::Assistant {
801 content: if text_content.is_empty() {
802 ChatMessageContent::empty()
803 } else {
804 text_content.into()
805 },
806 tool_calls,
807 });
808 }
809 Role::System => messages.push(ChatMessage::System {
810 content: message.string_contents(),
811 }),
812 }
813 }
814
815 let tools = request
816 .tools
817 .iter()
818 .map(|tool| Tool::Function {
819 function: copilot::copilot_chat::Function {
820 name: tool.name.clone(),
821 description: tool.description.clone(),
822 parameters: tool.input_schema.clone(),
823 },
824 })
825 .collect::<Vec<_>>();
826
827 Ok(CopilotChatRequest {
828 intent: true,
829 n: 1,
830 stream: model.uses_streaming(),
831 temperature: 0.1,
832 model: model.id().to_string(),
833 messages,
834 tools,
835 tool_choice: request.tool_choice.map(|choice| match choice {
836 LanguageModelToolChoice::Auto => copilot::copilot_chat::ToolChoice::Auto,
837 LanguageModelToolChoice::Any => copilot::copilot_chat::ToolChoice::Any,
838 LanguageModelToolChoice::None => copilot::copilot_chat::ToolChoice::None,
839 }),
840 })
841}
842
843fn into_copilot_responses(
844 model: &copilot::copilot_chat::Model,
845 request: LanguageModelRequest,
846) -> copilot::copilot_responses::Request {
847 use copilot::copilot_responses as responses;
848
849 let LanguageModelRequest {
850 thread_id: _,
851 prompt_id: _,
852 intent: _,
853 mode: _,
854 messages,
855 tools,
856 tool_choice,
857 stop: _,
858 temperature,
859 thinking_allowed: _,
860 } = request;
861
862 let mut input_items: Vec<responses::ResponseInputItem> = Vec::new();
863
864 for message in messages {
865 match message.role {
866 Role::User => {
867 for content in &message.content {
868 if let MessageContent::ToolResult(tool_result) = content {
869 let output = if let Some(out) = &tool_result.output {
870 match out {
871 serde_json::Value::String(s) => {
872 responses::ResponseFunctionOutput::Text(s.clone())
873 }
874 serde_json::Value::Null => {
875 responses::ResponseFunctionOutput::Text(String::new())
876 }
877 other => responses::ResponseFunctionOutput::Text(other.to_string()),
878 }
879 } else {
880 match &tool_result.content {
881 LanguageModelToolResultContent::Text(text) => {
882 responses::ResponseFunctionOutput::Text(text.to_string())
883 }
884 LanguageModelToolResultContent::Image(image) => {
885 if model.supports_vision() {
886 responses::ResponseFunctionOutput::Content(vec![
887 responses::ResponseInputContent::InputImage {
888 image_url: Some(image.to_base64_url()),
889 detail: Default::default(),
890 },
891 ])
892 } else {
893 debug_panic!(
894 "This should be caught at {} level",
895 tool_result.tool_name
896 );
897 responses::ResponseFunctionOutput::Text(
898 "[Tool responded with an image, but this model does not support vision]".into(),
899 )
900 }
901 }
902 }
903 };
904
905 input_items.push(responses::ResponseInputItem::FunctionCallOutput {
906 call_id: tool_result.tool_use_id.to_string(),
907 output,
908 status: None,
909 });
910 }
911 }
912
913 let mut parts: Vec<responses::ResponseInputContent> = Vec::new();
914 for content in &message.content {
915 match content {
916 MessageContent::Text(text) => {
917 parts.push(responses::ResponseInputContent::InputText {
918 text: text.clone(),
919 });
920 }
921
922 MessageContent::Image(image) => {
923 if model.supports_vision() {
924 parts.push(responses::ResponseInputContent::InputImage {
925 image_url: Some(image.to_base64_url()),
926 detail: Default::default(),
927 });
928 }
929 }
930 _ => {}
931 }
932 }
933
934 if !parts.is_empty() {
935 input_items.push(responses::ResponseInputItem::Message {
936 role: "user".into(),
937 content: Some(parts),
938 status: None,
939 });
940 }
941 }
942
943 Role::Assistant => {
944 for content in &message.content {
945 if let MessageContent::ToolUse(tool_use) = content {
946 input_items.push(responses::ResponseInputItem::FunctionCall {
947 call_id: tool_use.id.to_string(),
948 name: tool_use.name.to_string(),
949 arguments: tool_use.raw_input.clone(),
950 status: None,
951 });
952 }
953 }
954
955 for content in &message.content {
956 if let MessageContent::RedactedThinking(data) = content {
957 input_items.push(responses::ResponseInputItem::Reasoning {
958 id: None,
959 summary: Vec::new(),
960 encrypted_content: data.clone(),
961 });
962 }
963 }
964
965 let mut parts: Vec<responses::ResponseInputContent> = Vec::new();
966 for content in &message.content {
967 match content {
968 MessageContent::Text(text) => {
969 parts.push(responses::ResponseInputContent::OutputText {
970 text: text.clone(),
971 });
972 }
973 MessageContent::Image(_) => {
974 parts.push(responses::ResponseInputContent::OutputText {
975 text: "[image omitted]".to_string(),
976 });
977 }
978 _ => {}
979 }
980 }
981
982 if !parts.is_empty() {
983 input_items.push(responses::ResponseInputItem::Message {
984 role: "assistant".into(),
985 content: Some(parts),
986 status: Some("completed".into()),
987 });
988 }
989 }
990
991 Role::System => {
992 let mut parts: Vec<responses::ResponseInputContent> = Vec::new();
993 for content in &message.content {
994 if let MessageContent::Text(text) = content {
995 parts.push(responses::ResponseInputContent::InputText {
996 text: text.clone(),
997 });
998 }
999 }
1000
1001 if !parts.is_empty() {
1002 input_items.push(responses::ResponseInputItem::Message {
1003 role: "system".into(),
1004 content: Some(parts),
1005 status: None,
1006 });
1007 }
1008 }
1009 }
1010 }
1011
1012 let converted_tools: Vec<responses::ToolDefinition> = tools
1013 .into_iter()
1014 .map(|tool| responses::ToolDefinition::Function {
1015 name: tool.name,
1016 description: Some(tool.description),
1017 parameters: Some(tool.input_schema),
1018 strict: None,
1019 })
1020 .collect();
1021
1022 let mapped_tool_choice = tool_choice.map(|choice| match choice {
1023 LanguageModelToolChoice::Auto => responses::ToolChoice::Auto,
1024 LanguageModelToolChoice::Any => responses::ToolChoice::Any,
1025 LanguageModelToolChoice::None => responses::ToolChoice::None,
1026 });
1027
1028 responses::Request {
1029 model: model.id().to_string(),
1030 input: input_items,
1031 stream: model.uses_streaming(),
1032 temperature,
1033 tools: converted_tools,
1034 tool_choice: mapped_tool_choice,
1035 reasoning: None, // We would need to add support for setting from user settings.
1036 include: Some(vec![
1037 copilot::copilot_responses::ResponseIncludable::ReasoningEncryptedContent,
1038 ]),
1039 }
1040}
1041
1042#[cfg(test)]
1043mod tests {
1044 use super::*;
1045 use copilot::copilot_responses as responses;
1046 use futures::StreamExt;
1047
1048 fn map_events(events: Vec<responses::StreamEvent>) -> Vec<LanguageModelCompletionEvent> {
1049 futures::executor::block_on(async {
1050 CopilotResponsesEventMapper::new()
1051 .map_stream(Box::pin(futures::stream::iter(events.into_iter().map(Ok))))
1052 .collect::<Vec<_>>()
1053 .await
1054 .into_iter()
1055 .map(Result::unwrap)
1056 .collect()
1057 })
1058 }
1059
1060 #[test]
1061 fn responses_stream_maps_text_and_usage() {
1062 let events = vec![
1063 responses::StreamEvent::OutputItemAdded {
1064 output_index: 0,
1065 sequence_number: None,
1066 item: responses::ResponseOutputItem::Message {
1067 id: "msg_1".into(),
1068 role: "assistant".into(),
1069 content: Some(Vec::new()),
1070 },
1071 },
1072 responses::StreamEvent::OutputTextDelta {
1073 item_id: "msg_1".into(),
1074 output_index: 0,
1075 delta: "Hello".into(),
1076 },
1077 responses::StreamEvent::Completed {
1078 response: responses::Response {
1079 usage: Some(responses::ResponseUsage {
1080 input_tokens: Some(5),
1081 output_tokens: Some(3),
1082 total_tokens: Some(8),
1083 }),
1084 ..Default::default()
1085 },
1086 },
1087 ];
1088
1089 let mapped = map_events(events);
1090 assert!(matches!(
1091 mapped[0],
1092 LanguageModelCompletionEvent::StartMessage { ref message_id } if message_id == "msg_1"
1093 ));
1094 assert!(matches!(
1095 mapped[1],
1096 LanguageModelCompletionEvent::Text(ref text) if text == "Hello"
1097 ));
1098 assert!(matches!(
1099 mapped[2],
1100 LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
1101 input_tokens: 5,
1102 output_tokens: 3,
1103 ..
1104 })
1105 ));
1106 assert!(matches!(
1107 mapped[3],
1108 LanguageModelCompletionEvent::Stop(StopReason::EndTurn)
1109 ));
1110 }
1111
1112 #[test]
1113 fn responses_stream_maps_tool_calls() {
1114 let events = vec![responses::StreamEvent::OutputItemDone {
1115 output_index: 0,
1116 sequence_number: None,
1117 item: responses::ResponseOutputItem::FunctionCall {
1118 id: Some("fn_1".into()),
1119 call_id: "call_1".into(),
1120 name: "do_it".into(),
1121 arguments: "{\"x\":1}".into(),
1122 status: None,
1123 },
1124 }];
1125
1126 let mapped = map_events(events);
1127 assert!(matches!(
1128 mapped[0],
1129 LanguageModelCompletionEvent::ToolUse(ref use_) if use_.id.to_string() == "call_1" && use_.name.as_ref() == "do_it"
1130 ));
1131 assert!(matches!(
1132 mapped[1],
1133 LanguageModelCompletionEvent::Stop(StopReason::ToolUse)
1134 ));
1135 }
1136
1137 #[test]
1138 fn responses_stream_handles_json_parse_error() {
1139 let events = vec![responses::StreamEvent::OutputItemDone {
1140 output_index: 0,
1141 sequence_number: None,
1142 item: responses::ResponseOutputItem::FunctionCall {
1143 id: Some("fn_1".into()),
1144 call_id: "call_1".into(),
1145 name: "do_it".into(),
1146 arguments: "{not json}".into(),
1147 status: None,
1148 },
1149 }];
1150
1151 let mapped = map_events(events);
1152 assert!(matches!(
1153 mapped[0],
1154 LanguageModelCompletionEvent::ToolUseJsonParseError { ref id, ref tool_name, .. }
1155 if id.to_string() == "call_1" && tool_name.as_ref() == "do_it"
1156 ));
1157 assert!(matches!(
1158 mapped[1],
1159 LanguageModelCompletionEvent::Stop(StopReason::ToolUse)
1160 ));
1161 }
1162
1163 #[test]
1164 fn responses_stream_maps_reasoning_summary_and_encrypted_content() {
1165 let events = vec![responses::StreamEvent::OutputItemDone {
1166 output_index: 0,
1167 sequence_number: None,
1168 item: responses::ResponseOutputItem::Reasoning {
1169 id: "r1".into(),
1170 summary: Some(vec![responses::ResponseReasoningItem {
1171 kind: "summary_text".into(),
1172 text: "Chain".into(),
1173 }]),
1174 encrypted_content: Some("ENC".into()),
1175 },
1176 }];
1177
1178 let mapped = map_events(events);
1179 assert!(matches!(
1180 mapped[0],
1181 LanguageModelCompletionEvent::Thinking { ref text, signature: None } if text == "Chain"
1182 ));
1183 assert!(matches!(
1184 mapped[1],
1185 LanguageModelCompletionEvent::RedactedThinking { ref data } if data == "ENC"
1186 ));
1187 }
1188
1189 #[test]
1190 fn responses_stream_handles_incomplete_max_tokens() {
1191 let events = vec![responses::StreamEvent::Incomplete {
1192 response: responses::Response {
1193 usage: Some(responses::ResponseUsage {
1194 input_tokens: Some(10),
1195 output_tokens: Some(0),
1196 total_tokens: Some(10),
1197 }),
1198 incomplete_details: Some(responses::IncompleteDetails {
1199 reason: Some(responses::IncompleteReason::MaxOutputTokens),
1200 }),
1201 ..Default::default()
1202 },
1203 }];
1204
1205 let mapped = map_events(events);
1206 assert!(matches!(
1207 mapped[0],
1208 LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
1209 input_tokens: 10,
1210 output_tokens: 0,
1211 ..
1212 })
1213 ));
1214 assert!(matches!(
1215 mapped[1],
1216 LanguageModelCompletionEvent::Stop(StopReason::MaxTokens)
1217 ));
1218 }
1219
1220 #[test]
1221 fn responses_stream_handles_incomplete_content_filter() {
1222 let events = vec![responses::StreamEvent::Incomplete {
1223 response: responses::Response {
1224 usage: None,
1225 incomplete_details: Some(responses::IncompleteDetails {
1226 reason: Some(responses::IncompleteReason::ContentFilter),
1227 }),
1228 ..Default::default()
1229 },
1230 }];
1231
1232 let mapped = map_events(events);
1233 assert!(matches!(
1234 mapped.last().unwrap(),
1235 LanguageModelCompletionEvent::Stop(StopReason::Refusal)
1236 ));
1237 }
1238
1239 #[test]
1240 fn responses_stream_completed_no_duplicate_after_tool_use() {
1241 let events = vec![
1242 responses::StreamEvent::OutputItemDone {
1243 output_index: 0,
1244 sequence_number: None,
1245 item: responses::ResponseOutputItem::FunctionCall {
1246 id: Some("fn_1".into()),
1247 call_id: "call_1".into(),
1248 name: "do_it".into(),
1249 arguments: "{}".into(),
1250 status: None,
1251 },
1252 },
1253 responses::StreamEvent::Completed {
1254 response: responses::Response::default(),
1255 },
1256 ];
1257
1258 let mapped = map_events(events);
1259
1260 let mut stop_count = 0usize;
1261 let mut saw_tool_use_stop = false;
1262 for event in mapped {
1263 if let LanguageModelCompletionEvent::Stop(reason) = event {
1264 stop_count += 1;
1265 if matches!(reason, StopReason::ToolUse) {
1266 saw_tool_use_stop = true;
1267 }
1268 }
1269 }
1270 assert_eq!(stop_count, 1, "should emit exactly one Stop event");
1271 assert!(saw_tool_use_stop, "Stop reason should be ToolUse");
1272 }
1273
1274 #[test]
1275 fn responses_stream_failed_maps_http_response_error() {
1276 let events = vec![responses::StreamEvent::Failed {
1277 response: responses::Response {
1278 error: Some(responses::ResponseError {
1279 code: "429".into(),
1280 message: "too many requests".into(),
1281 }),
1282 ..Default::default()
1283 },
1284 }];
1285
1286 let mapped_results = futures::executor::block_on(async {
1287 CopilotResponsesEventMapper::new()
1288 .map_stream(Box::pin(futures::stream::iter(events.into_iter().map(Ok))))
1289 .collect::<Vec<_>>()
1290 .await
1291 });
1292
1293 assert_eq!(mapped_results.len(), 1);
1294 match &mapped_results[0] {
1295 Err(LanguageModelCompletionError::HttpResponseError {
1296 status_code,
1297 message,
1298 ..
1299 }) => {
1300 assert_eq!(*status_code, http_client::StatusCode::TOO_MANY_REQUESTS);
1301 assert_eq!(message, "too many requests");
1302 }
1303 other => panic!("expected HttpResponseError, got {:?}", other),
1304 }
1305 }
1306}
1307struct ConfigurationView {
1308 copilot_status: Option<copilot::Status>,
1309 state: Entity<State>,
1310 _subscription: Option<Subscription>,
1311}
1312
1313impl ConfigurationView {
1314 pub fn new(state: Entity<State>, cx: &mut Context<Self>) -> Self {
1315 let copilot = Copilot::global(cx);
1316
1317 Self {
1318 copilot_status: copilot.as_ref().map(|copilot| copilot.read(cx).status()),
1319 state,
1320 _subscription: copilot.as_ref().map(|copilot| {
1321 cx.observe(copilot, |this, model, cx| {
1322 this.copilot_status = Some(model.read(cx).status());
1323 cx.notify();
1324 })
1325 }),
1326 }
1327 }
1328}
1329
1330impl Render for ConfigurationView {
1331 fn render(&mut self, _window: &mut Window, cx: &mut Context<Self>) -> impl IntoElement {
1332 if self.state.read(cx).is_authenticated(cx) {
1333 ConfiguredApiCard::new("Authorized")
1334 .button_label("Sign Out")
1335 .on_click(|_, window, cx| {
1336 window.dispatch_action(copilot::SignOut.boxed_clone(), cx);
1337 })
1338 .into_any_element()
1339 } else {
1340 let loading_icon = Icon::new(IconName::ArrowCircle).with_rotate_animation(4);
1341
1342 const ERROR_LABEL: &str = "Copilot Chat requires an active GitHub Copilot subscription. Please ensure Copilot is configured and try again, or use a different Assistant provider.";
1343
1344 match &self.copilot_status {
1345 Some(status) => match status {
1346 Status::Starting { task: _ } => h_flex()
1347 .gap_2()
1348 .child(loading_icon)
1349 .child(Label::new("Starting Copilot…"))
1350 .into_any_element(),
1351 Status::SigningIn { prompt: _ }
1352 | Status::SignedOut {
1353 awaiting_signing_in: true,
1354 } => h_flex()
1355 .gap_2()
1356 .child(loading_icon)
1357 .child(Label::new("Signing into Copilot…"))
1358 .into_any_element(),
1359 Status::Error(_) => {
1360 const LABEL: &str = "Copilot had issues starting. Please try restarting it. If the issue persists, try reinstalling Copilot.";
1361 v_flex()
1362 .gap_6()
1363 .child(Label::new(LABEL))
1364 .child(svg().size_8().path(IconName::CopilotError.path()))
1365 .into_any_element()
1366 }
1367 _ => {
1368 const LABEL: &str = "To use Zed's agent with GitHub Copilot, you need to be logged in to GitHub. Note that your GitHub account must have an active Copilot Chat subscription.";
1369
1370 v_flex()
1371 .gap_2()
1372 .child(Label::new(LABEL))
1373 .child(
1374 Button::new("sign_in", "Sign in to use GitHub Copilot")
1375 .full_width()
1376 .style(ButtonStyle::Outlined)
1377 .icon_color(Color::Muted)
1378 .icon(IconName::Github)
1379 .icon_position(IconPosition::Start)
1380 .icon_size(IconSize::Small)
1381 .on_click(|_, window, cx| {
1382 copilot::initiate_sign_in(window, cx)
1383 }),
1384 )
1385 .into_any_element()
1386 }
1387 },
1388 None => v_flex()
1389 .gap_6()
1390 .child(Label::new(ERROR_LABEL))
1391 .into_any_element(),
1392 }
1393 }
1394 }
1395}