1use std::pin::Pin;
2use std::str::FromStr as _;
3use std::sync::Arc;
4
5use anyhow::{Result, anyhow};
6use cloud_llm_client::CompletionIntent;
7use collections::HashMap;
8use copilot::copilot_chat::{
9 ChatMessage, ChatMessageContent, ChatMessagePart, CopilotChat, ImageUrl,
10 Model as CopilotChatModel, ModelVendor, Request as CopilotChatRequest, ResponseEvent, Tool,
11 ToolCall,
12};
13use copilot::{Copilot, Status};
14use futures::future::BoxFuture;
15use futures::stream::BoxStream;
16use futures::{FutureExt, Stream, StreamExt};
17use gpui::{Action, AnyView, App, AsyncApp, Entity, Render, Subscription, Task, svg};
18use http_client::StatusCode;
19use language::language_settings::all_language_settings;
20use language_model::{
21 AuthenticateError, LanguageModel, LanguageModelCompletionError, LanguageModelCompletionEvent,
22 LanguageModelId, LanguageModelName, LanguageModelProvider, LanguageModelProviderId,
23 LanguageModelProviderName, LanguageModelProviderState, LanguageModelRequest,
24 LanguageModelRequestMessage, LanguageModelToolChoice, LanguageModelToolResultContent,
25 LanguageModelToolSchemaFormat, LanguageModelToolUse, MessageContent, RateLimiter, Role,
26 StopReason, TokenUsage,
27};
28use settings::SettingsStore;
29use ui::{CommonAnimationExt, prelude::*};
30use util::debug_panic;
31
32use crate::ui::ConfiguredApiCard;
33
34const PROVIDER_ID: LanguageModelProviderId = LanguageModelProviderId::new("copilot_chat");
35const PROVIDER_NAME: LanguageModelProviderName =
36 LanguageModelProviderName::new("GitHub Copilot Chat");
37
38pub struct CopilotChatLanguageModelProvider {
39 state: Entity<State>,
40}
41
42pub struct State {
43 _copilot_chat_subscription: Option<Subscription>,
44 _settings_subscription: Subscription,
45}
46
47impl State {
48 fn is_authenticated(&self, cx: &App) -> bool {
49 CopilotChat::global(cx)
50 .map(|m| m.read(cx).is_authenticated())
51 .unwrap_or(false)
52 }
53}
54
55impl CopilotChatLanguageModelProvider {
56 pub fn new(cx: &mut App) -> Self {
57 let state = cx.new(|cx| {
58 let copilot_chat_subscription = CopilotChat::global(cx)
59 .map(|copilot_chat| cx.observe(&copilot_chat, |_, _, cx| cx.notify()));
60 State {
61 _copilot_chat_subscription: copilot_chat_subscription,
62 _settings_subscription: cx.observe_global::<SettingsStore>(|_, cx| {
63 if let Some(copilot_chat) = CopilotChat::global(cx) {
64 let language_settings = all_language_settings(None, cx);
65 let configuration = copilot::copilot_chat::CopilotChatConfiguration {
66 enterprise_uri: language_settings
67 .edit_predictions
68 .copilot
69 .enterprise_uri
70 .clone(),
71 };
72 copilot_chat.update(cx, |chat, cx| {
73 chat.set_configuration(configuration, cx);
74 });
75 }
76 cx.notify();
77 }),
78 }
79 });
80
81 Self { state }
82 }
83
84 fn create_language_model(&self, model: CopilotChatModel) -> Arc<dyn LanguageModel> {
85 Arc::new(CopilotChatLanguageModel {
86 model,
87 request_limiter: RateLimiter::new(4),
88 })
89 }
90}
91
92impl LanguageModelProviderState for CopilotChatLanguageModelProvider {
93 type ObservableEntity = State;
94
95 fn observable_entity(&self) -> Option<Entity<Self::ObservableEntity>> {
96 Some(self.state.clone())
97 }
98}
99
100impl LanguageModelProvider for CopilotChatLanguageModelProvider {
101 fn id(&self) -> LanguageModelProviderId {
102 PROVIDER_ID
103 }
104
105 fn name(&self) -> LanguageModelProviderName {
106 PROVIDER_NAME
107 }
108
109 fn icon(&self) -> IconName {
110 IconName::Copilot
111 }
112
113 fn default_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
114 let models = CopilotChat::global(cx).and_then(|m| m.read(cx).models())?;
115 models
116 .first()
117 .map(|model| self.create_language_model(model.clone()))
118 }
119
120 fn default_fast_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
121 // The default model should be Copilot Chat's 'base model', which is likely a relatively fast
122 // model (e.g. 4o) and a sensible choice when considering premium requests
123 self.default_model(cx)
124 }
125
126 fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
127 let Some(models) = CopilotChat::global(cx).and_then(|m| m.read(cx).models()) else {
128 return Vec::new();
129 };
130 models
131 .iter()
132 .map(|model| self.create_language_model(model.clone()))
133 .collect()
134 }
135
136 fn is_authenticated(&self, cx: &App) -> bool {
137 self.state.read(cx).is_authenticated(cx)
138 }
139
140 fn authenticate(&self, cx: &mut App) -> Task<Result<(), AuthenticateError>> {
141 if self.is_authenticated(cx) {
142 return Task::ready(Ok(()));
143 };
144
145 let Some(copilot) = Copilot::global(cx) else {
146 return Task::ready( Err(anyhow!(
147 "Copilot must be enabled for Copilot Chat to work. Please enable Copilot and try again."
148 ).into()));
149 };
150
151 let err = match copilot.read(cx).status() {
152 Status::Authorized => return Task::ready(Ok(())),
153 Status::Disabled => anyhow!(
154 "Copilot must be enabled for Copilot Chat to work. Please enable Copilot and try again."
155 ),
156 Status::Error(err) => anyhow!(format!(
157 "Received the following error while signing into Copilot: {err}"
158 )),
159 Status::Starting { task: _ } => anyhow!(
160 "Copilot is still starting, please wait for Copilot to start then try again"
161 ),
162 Status::Unauthorized => anyhow!(
163 "Unable to authorize with Copilot. Please make sure that you have an active Copilot and Copilot Chat subscription."
164 ),
165 Status::SignedOut { .. } => {
166 anyhow!("You have signed out of Copilot. Please sign in to Copilot and try again.")
167 }
168 Status::SigningIn { prompt: _ } => anyhow!("Still signing into Copilot..."),
169 };
170
171 Task::ready(Err(err.into()))
172 }
173
174 fn configuration_view(
175 &self,
176 _target_agent: language_model::ConfigurationViewTargetAgent,
177 _: &mut Window,
178 cx: &mut App,
179 ) -> AnyView {
180 let state = self.state.clone();
181 cx.new(|cx| ConfigurationView::new(state, cx)).into()
182 }
183
184 fn reset_credentials(&self, _cx: &mut App) -> Task<Result<()>> {
185 Task::ready(Err(anyhow!(
186 "Signing out of GitHub Copilot Chat is currently not supported."
187 )))
188 }
189}
190
191fn collect_tiktoken_messages(
192 request: LanguageModelRequest,
193) -> Vec<tiktoken_rs::ChatCompletionRequestMessage> {
194 request
195 .messages
196 .into_iter()
197 .map(|message| tiktoken_rs::ChatCompletionRequestMessage {
198 role: match message.role {
199 Role::User => "user".into(),
200 Role::Assistant => "assistant".into(),
201 Role::System => "system".into(),
202 },
203 content: Some(message.string_contents()),
204 name: None,
205 function_call: None,
206 })
207 .collect::<Vec<_>>()
208}
209
210pub struct CopilotChatLanguageModel {
211 model: CopilotChatModel,
212 request_limiter: RateLimiter,
213}
214
215impl LanguageModel for CopilotChatLanguageModel {
216 fn id(&self) -> LanguageModelId {
217 LanguageModelId::from(self.model.id().to_string())
218 }
219
220 fn name(&self) -> LanguageModelName {
221 LanguageModelName::from(self.model.display_name().to_string())
222 }
223
224 fn provider_id(&self) -> LanguageModelProviderId {
225 PROVIDER_ID
226 }
227
228 fn provider_name(&self) -> LanguageModelProviderName {
229 PROVIDER_NAME
230 }
231
232 fn supports_tools(&self) -> bool {
233 self.model.supports_tools()
234 }
235
236 fn supports_images(&self) -> bool {
237 self.model.supports_vision()
238 }
239
240 fn tool_input_format(&self) -> LanguageModelToolSchemaFormat {
241 match self.model.vendor() {
242 ModelVendor::OpenAI | ModelVendor::Anthropic => {
243 LanguageModelToolSchemaFormat::JsonSchema
244 }
245 ModelVendor::Google | ModelVendor::XAI | ModelVendor::Unknown => {
246 LanguageModelToolSchemaFormat::JsonSchemaSubset
247 }
248 }
249 }
250
251 fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
252 match choice {
253 LanguageModelToolChoice::Auto
254 | LanguageModelToolChoice::Any
255 | LanguageModelToolChoice::None => self.supports_tools(),
256 }
257 }
258
259 fn telemetry_id(&self) -> String {
260 format!("copilot_chat/{}", self.model.id())
261 }
262
263 fn max_token_count(&self) -> u64 {
264 self.model.max_token_count()
265 }
266
267 fn count_tokens(
268 &self,
269 request: LanguageModelRequest,
270 cx: &App,
271 ) -> BoxFuture<'static, Result<u64>> {
272 let model = self.model.clone();
273 cx.background_spawn(async move {
274 let messages = collect_tiktoken_messages(request);
275 // Copilot uses OpenAI tiktoken tokenizer for all it's model irrespective of the underlying provider(vendor).
276 let tokenizer_model = match model.tokenizer() {
277 Some("o200k_base") => "gpt-4o",
278 Some("cl100k_base") => "gpt-4",
279 _ => "gpt-4o",
280 };
281
282 tiktoken_rs::num_tokens_from_messages(tokenizer_model, &messages)
283 .map(|tokens| tokens as u64)
284 })
285 .boxed()
286 }
287
288 fn stream_completion(
289 &self,
290 request: LanguageModelRequest,
291 cx: &AsyncApp,
292 ) -> BoxFuture<
293 'static,
294 Result<
295 BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>,
296 LanguageModelCompletionError,
297 >,
298 > {
299 let is_user_initiated = request.intent.is_none_or(|intent| match intent {
300 CompletionIntent::UserPrompt
301 | CompletionIntent::ThreadContextSummarization
302 | CompletionIntent::InlineAssist
303 | CompletionIntent::TerminalInlineAssist
304 | CompletionIntent::GenerateGitCommitMessage => true,
305
306 CompletionIntent::ToolResults
307 | CompletionIntent::ThreadSummarization
308 | CompletionIntent::CreateFile
309 | CompletionIntent::EditFile => false,
310 });
311
312 if self.model.supports_response() {
313 let responses_request = into_copilot_responses(&self.model, request);
314 let request_limiter = self.request_limiter.clone();
315 let future = cx.spawn(async move |cx| {
316 let request =
317 CopilotChat::stream_response(responses_request, is_user_initiated, cx.clone());
318 request_limiter
319 .stream(async move {
320 let stream = request.await?;
321 let mapper = CopilotResponsesEventMapper::new();
322 Ok(mapper.map_stream(stream).boxed())
323 })
324 .await
325 });
326 return async move { Ok(future.await?.boxed()) }.boxed();
327 }
328
329 let copilot_request = match into_copilot_chat(&self.model, request) {
330 Ok(request) => request,
331 Err(err) => return futures::future::ready(Err(err.into())).boxed(),
332 };
333 let is_streaming = copilot_request.stream;
334
335 let request_limiter = self.request_limiter.clone();
336 let future = cx.spawn(async move |cx| {
337 let request =
338 CopilotChat::stream_completion(copilot_request, is_user_initiated, cx.clone());
339 request_limiter
340 .stream(async move {
341 let response = request.await?;
342 Ok(map_to_language_model_completion_events(
343 response,
344 is_streaming,
345 ))
346 })
347 .await
348 });
349 async move { Ok(future.await?.boxed()) }.boxed()
350 }
351}
352
353pub fn map_to_language_model_completion_events(
354 events: Pin<Box<dyn Send + Stream<Item = Result<ResponseEvent>>>>,
355 is_streaming: bool,
356) -> impl Stream<Item = Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
357 #[derive(Default)]
358 struct RawToolCall {
359 id: String,
360 name: String,
361 arguments: String,
362 }
363
364 struct State {
365 events: Pin<Box<dyn Send + Stream<Item = Result<ResponseEvent>>>>,
366 tool_calls_by_index: HashMap<usize, RawToolCall>,
367 }
368
369 futures::stream::unfold(
370 State {
371 events,
372 tool_calls_by_index: HashMap::default(),
373 },
374 move |mut state| async move {
375 if let Some(event) = state.events.next().await {
376 match event {
377 Ok(event) => {
378 let Some(choice) = event.choices.first() else {
379 return Some((
380 vec![Err(anyhow!("Response contained no choices").into())],
381 state,
382 ));
383 };
384
385 let delta = if is_streaming {
386 choice.delta.as_ref()
387 } else {
388 choice.message.as_ref()
389 };
390
391 let Some(delta) = delta else {
392 return Some((
393 vec![Err(anyhow!("Response contained no delta").into())],
394 state,
395 ));
396 };
397
398 let mut events = Vec::new();
399 if let Some(content) = delta.content.clone() {
400 events.push(Ok(LanguageModelCompletionEvent::Text(content)));
401 }
402
403 for (index, tool_call) in delta.tool_calls.iter().enumerate() {
404 let tool_index = tool_call.index.unwrap_or(index);
405 let entry = state.tool_calls_by_index.entry(tool_index).or_default();
406
407 if let Some(tool_id) = tool_call.id.clone() {
408 entry.id = tool_id;
409 }
410
411 if let Some(function) = tool_call.function.as_ref() {
412 if let Some(name) = function.name.clone() {
413 entry.name = name;
414 }
415
416 if let Some(arguments) = function.arguments.clone() {
417 entry.arguments.push_str(&arguments);
418 }
419 }
420 }
421
422 if let Some(usage) = event.usage {
423 events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(
424 TokenUsage {
425 input_tokens: usage.prompt_tokens,
426 output_tokens: usage.completion_tokens,
427 cache_creation_input_tokens: 0,
428 cache_read_input_tokens: 0,
429 },
430 )));
431 }
432
433 match choice.finish_reason.as_deref() {
434 Some("stop") => {
435 events.push(Ok(LanguageModelCompletionEvent::Stop(
436 StopReason::EndTurn,
437 )));
438 }
439 Some("tool_calls") => {
440 events.extend(state.tool_calls_by_index.drain().map(
441 |(_, tool_call)| {
442 // The model can output an empty string
443 // to indicate the absence of arguments.
444 // When that happens, create an empty
445 // object instead.
446 let arguments = if tool_call.arguments.is_empty() {
447 Ok(serde_json::Value::Object(Default::default()))
448 } else {
449 serde_json::Value::from_str(&tool_call.arguments)
450 };
451 match arguments {
452 Ok(input) => Ok(LanguageModelCompletionEvent::ToolUse(
453 LanguageModelToolUse {
454 id: tool_call.id.into(),
455 name: tool_call.name.as_str().into(),
456 is_input_complete: true,
457 input,
458 raw_input: tool_call.arguments,
459 },
460 )),
461 Err(error) => Ok(
462 LanguageModelCompletionEvent::ToolUseJsonParseError {
463 id: tool_call.id.into(),
464 tool_name: tool_call.name.as_str().into(),
465 raw_input: tool_call.arguments.into(),
466 json_parse_error: error.to_string(),
467 },
468 ),
469 }
470 },
471 ));
472
473 events.push(Ok(LanguageModelCompletionEvent::Stop(
474 StopReason::ToolUse,
475 )));
476 }
477 Some(stop_reason) => {
478 log::error!("Unexpected Copilot Chat stop_reason: {stop_reason:?}");
479 events.push(Ok(LanguageModelCompletionEvent::Stop(
480 StopReason::EndTurn,
481 )));
482 }
483 None => {}
484 }
485
486 return Some((events, state));
487 }
488 Err(err) => return Some((vec![Err(anyhow!(err).into())], state)),
489 }
490 }
491
492 None
493 },
494 )
495 .flat_map(futures::stream::iter)
496}
497
498pub struct CopilotResponsesEventMapper {
499 pending_stop_reason: Option<StopReason>,
500}
501
502impl CopilotResponsesEventMapper {
503 pub fn new() -> Self {
504 Self {
505 pending_stop_reason: None,
506 }
507 }
508
509 pub fn map_stream(
510 mut self,
511 events: Pin<Box<dyn Send + Stream<Item = Result<copilot::copilot_responses::StreamEvent>>>>,
512 ) -> impl Stream<Item = Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
513 {
514 events.flat_map(move |event| {
515 futures::stream::iter(match event {
516 Ok(event) => self.map_event(event),
517 Err(error) => vec![Err(LanguageModelCompletionError::from(anyhow!(error)))],
518 })
519 })
520 }
521
522 fn map_event(
523 &mut self,
524 event: copilot::copilot_responses::StreamEvent,
525 ) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
526 match event {
527 copilot::copilot_responses::StreamEvent::OutputItemAdded { item, .. } => match item {
528 copilot::copilot_responses::ResponseOutputItem::Message { id, .. } => {
529 vec![Ok(LanguageModelCompletionEvent::StartMessage {
530 message_id: id,
531 })]
532 }
533 _ => Vec::new(),
534 },
535
536 copilot::copilot_responses::StreamEvent::OutputTextDelta { delta, .. } => {
537 if delta.is_empty() {
538 Vec::new()
539 } else {
540 vec![Ok(LanguageModelCompletionEvent::Text(delta))]
541 }
542 }
543
544 copilot::copilot_responses::StreamEvent::OutputItemDone { item, .. } => match item {
545 copilot::copilot_responses::ResponseOutputItem::Message { .. } => Vec::new(),
546 copilot::copilot_responses::ResponseOutputItem::FunctionCall {
547 call_id,
548 name,
549 arguments,
550 ..
551 } => {
552 let mut events = Vec::new();
553 match serde_json::from_str::<serde_json::Value>(&arguments) {
554 Ok(input) => events.push(Ok(LanguageModelCompletionEvent::ToolUse(
555 LanguageModelToolUse {
556 id: call_id.into(),
557 name: name.as_str().into(),
558 is_input_complete: true,
559 input,
560 raw_input: arguments.clone(),
561 },
562 ))),
563 Err(error) => {
564 events.push(Ok(LanguageModelCompletionEvent::ToolUseJsonParseError {
565 id: call_id.into(),
566 tool_name: name.as_str().into(),
567 raw_input: arguments.clone().into(),
568 json_parse_error: error.to_string(),
569 }))
570 }
571 }
572 // Record that we already emitted a tool-use stop so we can avoid duplicating
573 // a Stop event on Completed.
574 self.pending_stop_reason = Some(StopReason::ToolUse);
575 events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::ToolUse)));
576 events
577 }
578 copilot::copilot_responses::ResponseOutputItem::Reasoning {
579 summary,
580 encrypted_content,
581 ..
582 } => {
583 let mut events = Vec::new();
584
585 if let Some(blocks) = summary {
586 let mut text = String::new();
587 for block in blocks {
588 text.push_str(&block.text);
589 }
590 if !text.is_empty() {
591 events.push(Ok(LanguageModelCompletionEvent::Thinking {
592 text,
593 signature: None,
594 }));
595 }
596 }
597
598 if let Some(data) = encrypted_content {
599 events.push(Ok(LanguageModelCompletionEvent::RedactedThinking { data }));
600 }
601
602 events
603 }
604 },
605
606 copilot::copilot_responses::StreamEvent::Completed { response } => {
607 let mut events = Vec::new();
608 if let Some(usage) = response.usage {
609 events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
610 input_tokens: usage.input_tokens.unwrap_or(0),
611 output_tokens: usage.output_tokens.unwrap_or(0),
612 cache_creation_input_tokens: 0,
613 cache_read_input_tokens: 0,
614 })));
615 }
616 if self.pending_stop_reason.take() != Some(StopReason::ToolUse) {
617 events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::EndTurn)));
618 }
619 events
620 }
621
622 copilot::copilot_responses::StreamEvent::Incomplete { response } => {
623 let reason = response
624 .incomplete_details
625 .as_ref()
626 .and_then(|details| details.reason.as_ref());
627 let stop_reason = match reason {
628 Some(copilot::copilot_responses::IncompleteReason::MaxOutputTokens) => {
629 StopReason::MaxTokens
630 }
631 Some(copilot::copilot_responses::IncompleteReason::ContentFilter) => {
632 StopReason::Refusal
633 }
634 _ => self
635 .pending_stop_reason
636 .take()
637 .unwrap_or(StopReason::EndTurn),
638 };
639
640 let mut events = Vec::new();
641 if let Some(usage) = response.usage {
642 events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
643 input_tokens: usage.input_tokens.unwrap_or(0),
644 output_tokens: usage.output_tokens.unwrap_or(0),
645 cache_creation_input_tokens: 0,
646 cache_read_input_tokens: 0,
647 })));
648 }
649 events.push(Ok(LanguageModelCompletionEvent::Stop(stop_reason)));
650 events
651 }
652
653 copilot::copilot_responses::StreamEvent::Failed { response } => {
654 let provider = PROVIDER_NAME;
655 let (status_code, message) = match response.error {
656 Some(error) => {
657 let status_code = StatusCode::from_str(&error.code)
658 .unwrap_or(StatusCode::INTERNAL_SERVER_ERROR);
659 (status_code, error.message)
660 }
661 None => (
662 StatusCode::INTERNAL_SERVER_ERROR,
663 "response.failed".to_string(),
664 ),
665 };
666 vec![Err(LanguageModelCompletionError::HttpResponseError {
667 provider,
668 status_code,
669 message,
670 })]
671 }
672
673 copilot::copilot_responses::StreamEvent::GenericError { error } => vec![Err(
674 LanguageModelCompletionError::Other(anyhow!(format!("{error:?}"))),
675 )],
676
677 copilot::copilot_responses::StreamEvent::Created { .. }
678 | copilot::copilot_responses::StreamEvent::Unknown => Vec::new(),
679 }
680 }
681}
682
683fn into_copilot_chat(
684 model: &copilot::copilot_chat::Model,
685 request: LanguageModelRequest,
686) -> Result<CopilotChatRequest> {
687 let mut request_messages: Vec<LanguageModelRequestMessage> = Vec::new();
688 for message in request.messages {
689 if let Some(last_message) = request_messages.last_mut() {
690 if last_message.role == message.role {
691 last_message.content.extend(message.content);
692 } else {
693 request_messages.push(message);
694 }
695 } else {
696 request_messages.push(message);
697 }
698 }
699
700 let mut messages: Vec<ChatMessage> = Vec::new();
701 for message in request_messages {
702 match message.role {
703 Role::User => {
704 for content in &message.content {
705 if let MessageContent::ToolResult(tool_result) = content {
706 let content = match &tool_result.content {
707 LanguageModelToolResultContent::Text(text) => text.to_string().into(),
708 LanguageModelToolResultContent::Image(image) => {
709 if model.supports_vision() {
710 ChatMessageContent::Multipart(vec![ChatMessagePart::Image {
711 image_url: ImageUrl {
712 url: image.to_base64_url(),
713 },
714 }])
715 } else {
716 debug_panic!(
717 "This should be caught at {} level",
718 tool_result.tool_name
719 );
720 "[Tool responded with an image, but this model does not support vision]".to_string().into()
721 }
722 }
723 };
724
725 messages.push(ChatMessage::Tool {
726 tool_call_id: tool_result.tool_use_id.to_string(),
727 content,
728 });
729 }
730 }
731
732 let mut content_parts = Vec::new();
733 for content in &message.content {
734 match content {
735 MessageContent::Text(text) | MessageContent::Thinking { text, .. }
736 if !text.is_empty() =>
737 {
738 if let Some(ChatMessagePart::Text { text: text_content }) =
739 content_parts.last_mut()
740 {
741 text_content.push_str(text);
742 } else {
743 content_parts.push(ChatMessagePart::Text {
744 text: text.to_string(),
745 });
746 }
747 }
748 MessageContent::Image(image) if model.supports_vision() => {
749 content_parts.push(ChatMessagePart::Image {
750 image_url: ImageUrl {
751 url: image.to_base64_url(),
752 },
753 });
754 }
755 _ => {}
756 }
757 }
758
759 if !content_parts.is_empty() {
760 messages.push(ChatMessage::User {
761 content: content_parts.into(),
762 });
763 }
764 }
765 Role::Assistant => {
766 let mut tool_calls = Vec::new();
767 for content in &message.content {
768 if let MessageContent::ToolUse(tool_use) = content {
769 tool_calls.push(ToolCall {
770 id: tool_use.id.to_string(),
771 content: copilot::copilot_chat::ToolCallContent::Function {
772 function: copilot::copilot_chat::FunctionContent {
773 name: tool_use.name.to_string(),
774 arguments: serde_json::to_string(&tool_use.input)?,
775 },
776 },
777 });
778 }
779 }
780
781 let text_content = {
782 let mut buffer = String::new();
783 for string in message.content.iter().filter_map(|content| match content {
784 MessageContent::Text(text) | MessageContent::Thinking { text, .. } => {
785 Some(text.as_str())
786 }
787 MessageContent::ToolUse(_)
788 | MessageContent::RedactedThinking(_)
789 | MessageContent::ToolResult(_)
790 | MessageContent::Image(_) => None,
791 }) {
792 buffer.push_str(string);
793 }
794
795 buffer
796 };
797
798 messages.push(ChatMessage::Assistant {
799 content: if text_content.is_empty() {
800 ChatMessageContent::empty()
801 } else {
802 text_content.into()
803 },
804 tool_calls,
805 });
806 }
807 Role::System => messages.push(ChatMessage::System {
808 content: message.string_contents(),
809 }),
810 }
811 }
812
813 let tools = request
814 .tools
815 .iter()
816 .map(|tool| Tool::Function {
817 function: copilot::copilot_chat::Function {
818 name: tool.name.clone(),
819 description: tool.description.clone(),
820 parameters: tool.input_schema.clone(),
821 },
822 })
823 .collect::<Vec<_>>();
824
825 Ok(CopilotChatRequest {
826 intent: true,
827 n: 1,
828 stream: model.uses_streaming(),
829 temperature: 0.1,
830 model: model.id().to_string(),
831 messages,
832 tools,
833 tool_choice: request.tool_choice.map(|choice| match choice {
834 LanguageModelToolChoice::Auto => copilot::copilot_chat::ToolChoice::Auto,
835 LanguageModelToolChoice::Any => copilot::copilot_chat::ToolChoice::Any,
836 LanguageModelToolChoice::None => copilot::copilot_chat::ToolChoice::None,
837 }),
838 })
839}
840
841fn into_copilot_responses(
842 model: &copilot::copilot_chat::Model,
843 request: LanguageModelRequest,
844) -> copilot::copilot_responses::Request {
845 use copilot::copilot_responses as responses;
846
847 let LanguageModelRequest {
848 thread_id: _,
849 prompt_id: _,
850 intent: _,
851 mode: _,
852 messages,
853 tools,
854 tool_choice,
855 stop: _,
856 temperature,
857 thinking_allowed: _,
858 } = request;
859
860 let mut input_items: Vec<responses::ResponseInputItem> = Vec::new();
861
862 for message in messages {
863 match message.role {
864 Role::User => {
865 for content in &message.content {
866 if let MessageContent::ToolResult(tool_result) = content {
867 let output = if let Some(out) = &tool_result.output {
868 match out {
869 serde_json::Value::String(s) => {
870 responses::ResponseFunctionOutput::Text(s.clone())
871 }
872 serde_json::Value::Null => {
873 responses::ResponseFunctionOutput::Text(String::new())
874 }
875 other => responses::ResponseFunctionOutput::Text(other.to_string()),
876 }
877 } else {
878 match &tool_result.content {
879 LanguageModelToolResultContent::Text(text) => {
880 responses::ResponseFunctionOutput::Text(text.to_string())
881 }
882 LanguageModelToolResultContent::Image(image) => {
883 if model.supports_vision() {
884 responses::ResponseFunctionOutput::Content(vec![
885 responses::ResponseInputContent::InputImage {
886 image_url: Some(image.to_base64_url()),
887 detail: Default::default(),
888 },
889 ])
890 } else {
891 debug_panic!(
892 "This should be caught at {} level",
893 tool_result.tool_name
894 );
895 responses::ResponseFunctionOutput::Text(
896 "[Tool responded with an image, but this model does not support vision]".into(),
897 )
898 }
899 }
900 }
901 };
902
903 input_items.push(responses::ResponseInputItem::FunctionCallOutput {
904 call_id: tool_result.tool_use_id.to_string(),
905 output,
906 status: None,
907 });
908 }
909 }
910
911 let mut parts: Vec<responses::ResponseInputContent> = Vec::new();
912 for content in &message.content {
913 match content {
914 MessageContent::Text(text) => {
915 parts.push(responses::ResponseInputContent::InputText {
916 text: text.clone(),
917 });
918 }
919
920 MessageContent::Image(image) => {
921 if model.supports_vision() {
922 parts.push(responses::ResponseInputContent::InputImage {
923 image_url: Some(image.to_base64_url()),
924 detail: Default::default(),
925 });
926 }
927 }
928 _ => {}
929 }
930 }
931
932 if !parts.is_empty() {
933 input_items.push(responses::ResponseInputItem::Message {
934 role: "user".into(),
935 content: Some(parts),
936 status: None,
937 });
938 }
939 }
940
941 Role::Assistant => {
942 for content in &message.content {
943 if let MessageContent::ToolUse(tool_use) = content {
944 input_items.push(responses::ResponseInputItem::FunctionCall {
945 call_id: tool_use.id.to_string(),
946 name: tool_use.name.to_string(),
947 arguments: tool_use.raw_input.clone(),
948 status: None,
949 });
950 }
951 }
952
953 for content in &message.content {
954 if let MessageContent::RedactedThinking(data) = content {
955 input_items.push(responses::ResponseInputItem::Reasoning {
956 id: None,
957 summary: Vec::new(),
958 encrypted_content: data.clone(),
959 });
960 }
961 }
962
963 let mut parts: Vec<responses::ResponseInputContent> = Vec::new();
964 for content in &message.content {
965 match content {
966 MessageContent::Text(text) => {
967 parts.push(responses::ResponseInputContent::OutputText {
968 text: text.clone(),
969 });
970 }
971 MessageContent::Image(_) => {
972 parts.push(responses::ResponseInputContent::OutputText {
973 text: "[image omitted]".to_string(),
974 });
975 }
976 _ => {}
977 }
978 }
979
980 if !parts.is_empty() {
981 input_items.push(responses::ResponseInputItem::Message {
982 role: "assistant".into(),
983 content: Some(parts),
984 status: Some("completed".into()),
985 });
986 }
987 }
988
989 Role::System => {
990 let mut parts: Vec<responses::ResponseInputContent> = Vec::new();
991 for content in &message.content {
992 if let MessageContent::Text(text) = content {
993 parts.push(responses::ResponseInputContent::InputText {
994 text: text.clone(),
995 });
996 }
997 }
998
999 if !parts.is_empty() {
1000 input_items.push(responses::ResponseInputItem::Message {
1001 role: "system".into(),
1002 content: Some(parts),
1003 status: None,
1004 });
1005 }
1006 }
1007 }
1008 }
1009
1010 let converted_tools: Vec<responses::ToolDefinition> = tools
1011 .into_iter()
1012 .map(|tool| responses::ToolDefinition::Function {
1013 name: tool.name,
1014 description: Some(tool.description),
1015 parameters: Some(tool.input_schema),
1016 strict: None,
1017 })
1018 .collect();
1019
1020 let mapped_tool_choice = tool_choice.map(|choice| match choice {
1021 LanguageModelToolChoice::Auto => responses::ToolChoice::Auto,
1022 LanguageModelToolChoice::Any => responses::ToolChoice::Any,
1023 LanguageModelToolChoice::None => responses::ToolChoice::None,
1024 });
1025
1026 responses::Request {
1027 model: model.id().to_string(),
1028 input: input_items,
1029 stream: model.uses_streaming(),
1030 temperature,
1031 tools: converted_tools,
1032 tool_choice: mapped_tool_choice,
1033 reasoning: None, // We would need to add support for setting from user settings.
1034 include: Some(vec![
1035 copilot::copilot_responses::ResponseIncludable::ReasoningEncryptedContent,
1036 ]),
1037 }
1038}
1039
1040#[cfg(test)]
1041mod tests {
1042 use super::*;
1043 use copilot::copilot_responses as responses;
1044 use futures::StreamExt;
1045
1046 fn map_events(events: Vec<responses::StreamEvent>) -> Vec<LanguageModelCompletionEvent> {
1047 futures::executor::block_on(async {
1048 CopilotResponsesEventMapper::new()
1049 .map_stream(Box::pin(futures::stream::iter(events.into_iter().map(Ok))))
1050 .collect::<Vec<_>>()
1051 .await
1052 .into_iter()
1053 .map(Result::unwrap)
1054 .collect()
1055 })
1056 }
1057
1058 #[test]
1059 fn responses_stream_maps_text_and_usage() {
1060 let events = vec![
1061 responses::StreamEvent::OutputItemAdded {
1062 output_index: 0,
1063 sequence_number: None,
1064 item: responses::ResponseOutputItem::Message {
1065 id: "msg_1".into(),
1066 role: "assistant".into(),
1067 content: Some(Vec::new()),
1068 },
1069 },
1070 responses::StreamEvent::OutputTextDelta {
1071 item_id: "msg_1".into(),
1072 output_index: 0,
1073 delta: "Hello".into(),
1074 },
1075 responses::StreamEvent::Completed {
1076 response: responses::Response {
1077 usage: Some(responses::ResponseUsage {
1078 input_tokens: Some(5),
1079 output_tokens: Some(3),
1080 total_tokens: Some(8),
1081 }),
1082 ..Default::default()
1083 },
1084 },
1085 ];
1086
1087 let mapped = map_events(events);
1088 assert!(matches!(
1089 mapped[0],
1090 LanguageModelCompletionEvent::StartMessage { ref message_id } if message_id == "msg_1"
1091 ));
1092 assert!(matches!(
1093 mapped[1],
1094 LanguageModelCompletionEvent::Text(ref text) if text == "Hello"
1095 ));
1096 assert!(matches!(
1097 mapped[2],
1098 LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
1099 input_tokens: 5,
1100 output_tokens: 3,
1101 ..
1102 })
1103 ));
1104 assert!(matches!(
1105 mapped[3],
1106 LanguageModelCompletionEvent::Stop(StopReason::EndTurn)
1107 ));
1108 }
1109
1110 #[test]
1111 fn responses_stream_maps_tool_calls() {
1112 let events = vec![responses::StreamEvent::OutputItemDone {
1113 output_index: 0,
1114 sequence_number: None,
1115 item: responses::ResponseOutputItem::FunctionCall {
1116 id: Some("fn_1".into()),
1117 call_id: "call_1".into(),
1118 name: "do_it".into(),
1119 arguments: "{\"x\":1}".into(),
1120 status: None,
1121 },
1122 }];
1123
1124 let mapped = map_events(events);
1125 assert!(matches!(
1126 mapped[0],
1127 LanguageModelCompletionEvent::ToolUse(ref use_) if use_.id.to_string() == "call_1" && use_.name.as_ref() == "do_it"
1128 ));
1129 assert!(matches!(
1130 mapped[1],
1131 LanguageModelCompletionEvent::Stop(StopReason::ToolUse)
1132 ));
1133 }
1134
1135 #[test]
1136 fn responses_stream_handles_json_parse_error() {
1137 let events = vec![responses::StreamEvent::OutputItemDone {
1138 output_index: 0,
1139 sequence_number: None,
1140 item: responses::ResponseOutputItem::FunctionCall {
1141 id: Some("fn_1".into()),
1142 call_id: "call_1".into(),
1143 name: "do_it".into(),
1144 arguments: "{not json}".into(),
1145 status: None,
1146 },
1147 }];
1148
1149 let mapped = map_events(events);
1150 assert!(matches!(
1151 mapped[0],
1152 LanguageModelCompletionEvent::ToolUseJsonParseError { ref id, ref tool_name, .. }
1153 if id.to_string() == "call_1" && tool_name.as_ref() == "do_it"
1154 ));
1155 assert!(matches!(
1156 mapped[1],
1157 LanguageModelCompletionEvent::Stop(StopReason::ToolUse)
1158 ));
1159 }
1160
1161 #[test]
1162 fn responses_stream_maps_reasoning_summary_and_encrypted_content() {
1163 let events = vec![responses::StreamEvent::OutputItemDone {
1164 output_index: 0,
1165 sequence_number: None,
1166 item: responses::ResponseOutputItem::Reasoning {
1167 id: "r1".into(),
1168 summary: Some(vec![responses::ResponseReasoningItem {
1169 kind: "summary_text".into(),
1170 text: "Chain".into(),
1171 }]),
1172 encrypted_content: Some("ENC".into()),
1173 },
1174 }];
1175
1176 let mapped = map_events(events);
1177 assert!(matches!(
1178 mapped[0],
1179 LanguageModelCompletionEvent::Thinking { ref text, signature: None } if text == "Chain"
1180 ));
1181 assert!(matches!(
1182 mapped[1],
1183 LanguageModelCompletionEvent::RedactedThinking { ref data } if data == "ENC"
1184 ));
1185 }
1186
1187 #[test]
1188 fn responses_stream_handles_incomplete_max_tokens() {
1189 let events = vec![responses::StreamEvent::Incomplete {
1190 response: responses::Response {
1191 usage: Some(responses::ResponseUsage {
1192 input_tokens: Some(10),
1193 output_tokens: Some(0),
1194 total_tokens: Some(10),
1195 }),
1196 incomplete_details: Some(responses::IncompleteDetails {
1197 reason: Some(responses::IncompleteReason::MaxOutputTokens),
1198 }),
1199 ..Default::default()
1200 },
1201 }];
1202
1203 let mapped = map_events(events);
1204 assert!(matches!(
1205 mapped[0],
1206 LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
1207 input_tokens: 10,
1208 output_tokens: 0,
1209 ..
1210 })
1211 ));
1212 assert!(matches!(
1213 mapped[1],
1214 LanguageModelCompletionEvent::Stop(StopReason::MaxTokens)
1215 ));
1216 }
1217
1218 #[test]
1219 fn responses_stream_handles_incomplete_content_filter() {
1220 let events = vec![responses::StreamEvent::Incomplete {
1221 response: responses::Response {
1222 usage: None,
1223 incomplete_details: Some(responses::IncompleteDetails {
1224 reason: Some(responses::IncompleteReason::ContentFilter),
1225 }),
1226 ..Default::default()
1227 },
1228 }];
1229
1230 let mapped = map_events(events);
1231 assert!(matches!(
1232 mapped.last().unwrap(),
1233 LanguageModelCompletionEvent::Stop(StopReason::Refusal)
1234 ));
1235 }
1236
1237 #[test]
1238 fn responses_stream_completed_no_duplicate_after_tool_use() {
1239 let events = vec![
1240 responses::StreamEvent::OutputItemDone {
1241 output_index: 0,
1242 sequence_number: None,
1243 item: responses::ResponseOutputItem::FunctionCall {
1244 id: Some("fn_1".into()),
1245 call_id: "call_1".into(),
1246 name: "do_it".into(),
1247 arguments: "{}".into(),
1248 status: None,
1249 },
1250 },
1251 responses::StreamEvent::Completed {
1252 response: responses::Response::default(),
1253 },
1254 ];
1255
1256 let mapped = map_events(events);
1257
1258 let mut stop_count = 0usize;
1259 let mut saw_tool_use_stop = false;
1260 for event in mapped {
1261 if let LanguageModelCompletionEvent::Stop(reason) = event {
1262 stop_count += 1;
1263 if matches!(reason, StopReason::ToolUse) {
1264 saw_tool_use_stop = true;
1265 }
1266 }
1267 }
1268 assert_eq!(stop_count, 1, "should emit exactly one Stop event");
1269 assert!(saw_tool_use_stop, "Stop reason should be ToolUse");
1270 }
1271
1272 #[test]
1273 fn responses_stream_failed_maps_http_response_error() {
1274 let events = vec![responses::StreamEvent::Failed {
1275 response: responses::Response {
1276 error: Some(responses::ResponseError {
1277 code: "429".into(),
1278 message: "too many requests".into(),
1279 }),
1280 ..Default::default()
1281 },
1282 }];
1283
1284 let mapped_results = futures::executor::block_on(async {
1285 CopilotResponsesEventMapper::new()
1286 .map_stream(Box::pin(futures::stream::iter(events.into_iter().map(Ok))))
1287 .collect::<Vec<_>>()
1288 .await
1289 });
1290
1291 assert_eq!(mapped_results.len(), 1);
1292 match &mapped_results[0] {
1293 Err(LanguageModelCompletionError::HttpResponseError {
1294 status_code,
1295 message,
1296 ..
1297 }) => {
1298 assert_eq!(*status_code, http_client::StatusCode::TOO_MANY_REQUESTS);
1299 assert_eq!(message, "too many requests");
1300 }
1301 other => panic!("expected HttpResponseError, got {:?}", other),
1302 }
1303 }
1304}
1305struct ConfigurationView {
1306 copilot_status: Option<copilot::Status>,
1307 state: Entity<State>,
1308 _subscription: Option<Subscription>,
1309}
1310
1311impl ConfigurationView {
1312 pub fn new(state: Entity<State>, cx: &mut Context<Self>) -> Self {
1313 let copilot = Copilot::global(cx);
1314
1315 Self {
1316 copilot_status: copilot.as_ref().map(|copilot| copilot.read(cx).status()),
1317 state,
1318 _subscription: copilot.as_ref().map(|copilot| {
1319 cx.observe(copilot, |this, model, cx| {
1320 this.copilot_status = Some(model.read(cx).status());
1321 cx.notify();
1322 })
1323 }),
1324 }
1325 }
1326}
1327
1328impl Render for ConfigurationView {
1329 fn render(&mut self, _window: &mut Window, cx: &mut Context<Self>) -> impl IntoElement {
1330 if self.state.read(cx).is_authenticated(cx) {
1331 ConfiguredApiCard::new("Authorized")
1332 .button_label("Sign Out")
1333 .on_click(|_, window, cx| {
1334 window.dispatch_action(copilot::SignOut.boxed_clone(), cx);
1335 })
1336 .into_any_element()
1337 } else {
1338 let loading_icon = Icon::new(IconName::ArrowCircle).with_rotate_animation(4);
1339
1340 const ERROR_LABEL: &str = "Copilot Chat requires an active GitHub Copilot subscription. Please ensure Copilot is configured and try again, or use a different Assistant provider.";
1341
1342 match &self.copilot_status {
1343 Some(status) => match status {
1344 Status::Starting { task: _ } => h_flex()
1345 .gap_2()
1346 .child(loading_icon)
1347 .child(Label::new("Starting Copilot…"))
1348 .into_any_element(),
1349 Status::SigningIn { prompt: _ }
1350 | Status::SignedOut {
1351 awaiting_signing_in: true,
1352 } => h_flex()
1353 .gap_2()
1354 .child(loading_icon)
1355 .child(Label::new("Signing into Copilot…"))
1356 .into_any_element(),
1357 Status::Error(_) => {
1358 const LABEL: &str = "Copilot had issues starting. Please try restarting it. If the issue persists, try reinstalling Copilot.";
1359 v_flex()
1360 .gap_6()
1361 .child(Label::new(LABEL))
1362 .child(svg().size_8().path(IconName::CopilotError.path()))
1363 .into_any_element()
1364 }
1365 _ => {
1366 const LABEL: &str = "To use Zed's agent with GitHub Copilot, you need to be logged in to GitHub. Note that your GitHub account must have an active Copilot Chat subscription.";
1367
1368 v_flex()
1369 .gap_2()
1370 .child(Label::new(LABEL))
1371 .child(
1372 Button::new("sign_in", "Sign in to use GitHub Copilot")
1373 .full_width()
1374 .style(ButtonStyle::Outlined)
1375 .icon_color(Color::Muted)
1376 .icon(IconName::Github)
1377 .icon_position(IconPosition::Start)
1378 .icon_size(IconSize::Small)
1379 .on_click(|_, window, cx| {
1380 copilot::initiate_sign_in(window, cx)
1381 }),
1382 )
1383 .into_any_element()
1384 }
1385 },
1386 None => v_flex()
1387 .gap_6()
1388 .child(Label::new(ERROR_LABEL))
1389 .into_any_element(),
1390 }
1391 }
1392 }
1393}