1use std::pin::Pin;
2use std::str::FromStr as _;
3use std::sync::Arc;
4
5use anyhow::{Result, anyhow};
6use cloud_llm_client::CompletionIntent;
7use collections::HashMap;
8use copilot::copilot_chat::{
9 ChatMessage, ChatMessageContent, ChatMessagePart, CopilotChat, ImageUrl,
10 Model as CopilotChatModel, ModelVendor, Request as CopilotChatRequest, ResponseEvent, Tool,
11 ToolCall,
12};
13use copilot::{Copilot, Status};
14use futures::future::BoxFuture;
15use futures::stream::BoxStream;
16use futures::{FutureExt, Stream, StreamExt};
17use gpui::{Action, AnyView, App, AsyncApp, Entity, Render, Subscription, Task, svg};
18use http_client::StatusCode;
19use language::language_settings::all_language_settings;
20use language_model::{
21 AuthenticateError, LanguageModel, LanguageModelCompletionError, LanguageModelCompletionEvent,
22 LanguageModelId, LanguageModelName, LanguageModelProvider, LanguageModelProviderId,
23 LanguageModelProviderName, LanguageModelProviderState, LanguageModelRequest,
24 LanguageModelRequestMessage, LanguageModelToolChoice, LanguageModelToolResultContent,
25 LanguageModelToolSchemaFormat, LanguageModelToolUse, MessageContent, RateLimiter, Role,
26 StopReason, TokenUsage,
27};
28use settings::SettingsStore;
29use ui::{CommonAnimationExt, prelude::*};
30use util::debug_panic;
31
32use crate::ui::ConfiguredApiCard;
33
34const PROVIDER_ID: LanguageModelProviderId = LanguageModelProviderId::new("copilot_chat");
35const PROVIDER_NAME: LanguageModelProviderName =
36 LanguageModelProviderName::new("GitHub Copilot Chat");
37
38pub struct CopilotChatLanguageModelProvider {
39 state: Entity<State>,
40}
41
42pub struct State {
43 _copilot_chat_subscription: Option<Subscription>,
44 _settings_subscription: Subscription,
45}
46
47impl State {
48 fn is_authenticated(&self, cx: &App) -> bool {
49 CopilotChat::global(cx)
50 .map(|m| m.read(cx).is_authenticated())
51 .unwrap_or(false)
52 }
53}
54
55impl CopilotChatLanguageModelProvider {
56 pub fn new(cx: &mut App) -> Self {
57 let state = cx.new(|cx| {
58 let copilot_chat_subscription = CopilotChat::global(cx)
59 .map(|copilot_chat| cx.observe(&copilot_chat, |_, _, cx| cx.notify()));
60 State {
61 _copilot_chat_subscription: copilot_chat_subscription,
62 _settings_subscription: cx.observe_global::<SettingsStore>(|_, cx| {
63 if let Some(copilot_chat) = CopilotChat::global(cx) {
64 let language_settings = all_language_settings(None, cx);
65 let configuration = copilot::copilot_chat::CopilotChatConfiguration {
66 enterprise_uri: language_settings
67 .edit_predictions
68 .copilot
69 .enterprise_uri
70 .clone(),
71 };
72 copilot_chat.update(cx, |chat, cx| {
73 chat.set_configuration(configuration, cx);
74 });
75 }
76 cx.notify();
77 }),
78 }
79 });
80
81 Self { state }
82 }
83
84 fn create_language_model(&self, model: CopilotChatModel) -> Arc<dyn LanguageModel> {
85 Arc::new(CopilotChatLanguageModel {
86 model,
87 request_limiter: RateLimiter::new(4),
88 })
89 }
90}
91
92impl LanguageModelProviderState for CopilotChatLanguageModelProvider {
93 type ObservableEntity = State;
94
95 fn observable_entity(&self) -> Option<Entity<Self::ObservableEntity>> {
96 Some(self.state.clone())
97 }
98}
99
100impl LanguageModelProvider for CopilotChatLanguageModelProvider {
101 fn id(&self) -> LanguageModelProviderId {
102 PROVIDER_ID
103 }
104
105 fn name(&self) -> LanguageModelProviderName {
106 PROVIDER_NAME
107 }
108
109 fn icon(&self) -> IconName {
110 IconName::Copilot
111 }
112
113 fn default_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
114 let models = CopilotChat::global(cx).and_then(|m| m.read(cx).models())?;
115 models
116 .first()
117 .map(|model| self.create_language_model(model.clone()))
118 }
119
120 fn default_fast_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
121 // The default model should be Copilot Chat's 'base model', which is likely a relatively fast
122 // model (e.g. 4o) and a sensible choice when considering premium requests
123 self.default_model(cx)
124 }
125
126 fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
127 let Some(models) = CopilotChat::global(cx).and_then(|m| m.read(cx).models()) else {
128 return Vec::new();
129 };
130 models
131 .iter()
132 .map(|model| self.create_language_model(model.clone()))
133 .collect()
134 }
135
136 fn is_authenticated(&self, cx: &App) -> bool {
137 self.state.read(cx).is_authenticated(cx)
138 }
139
140 fn authenticate(&self, cx: &mut App) -> Task<Result<(), AuthenticateError>> {
141 if self.is_authenticated(cx) {
142 return Task::ready(Ok(()));
143 };
144
145 let Some(copilot) = Copilot::global(cx) else {
146 return Task::ready(Err(anyhow!(concat!(
147 "Copilot must be enabled for Copilot Chat to work. ",
148 "Please enable Copilot and try again."
149 ))
150 .into()));
151 };
152
153 let err = match copilot.read(cx).status() {
154 Status::Authorized => return Task::ready(Ok(())),
155 Status::Disabled => anyhow!(
156 "Copilot must be enabled for Copilot Chat to work. Please enable Copilot and try again."
157 ),
158 Status::Error(err) => anyhow!(format!(
159 "Received the following error while signing into Copilot: {err}"
160 )),
161 Status::Starting { task: _ } => anyhow!(
162 "Copilot is still starting, please wait for Copilot to start then try again"
163 ),
164 Status::Unauthorized => anyhow!(
165 "Unable to authorize with Copilot. Please make sure that you have an active Copilot and Copilot Chat subscription."
166 ),
167 Status::SignedOut { .. } => {
168 anyhow!("You have signed out of Copilot. Please sign in to Copilot and try again.")
169 }
170 Status::SigningIn { prompt: _ } => anyhow!("Still signing into Copilot..."),
171 };
172
173 Task::ready(Err(err.into()))
174 }
175
176 fn configuration_view(
177 &self,
178 _target_agent: language_model::ConfigurationViewTargetAgent,
179 _: &mut Window,
180 cx: &mut App,
181 ) -> AnyView {
182 let state = self.state.clone();
183 cx.new(|cx| ConfigurationView::new(state, cx)).into()
184 }
185
186 fn reset_credentials(&self, _cx: &mut App) -> Task<Result<()>> {
187 Task::ready(Err(anyhow!(
188 "Signing out of GitHub Copilot Chat is currently not supported."
189 )))
190 }
191}
192
193fn collect_tiktoken_messages(
194 request: LanguageModelRequest,
195) -> Vec<tiktoken_rs::ChatCompletionRequestMessage> {
196 request
197 .messages
198 .into_iter()
199 .map(|message| tiktoken_rs::ChatCompletionRequestMessage {
200 role: match message.role {
201 Role::User => "user".into(),
202 Role::Assistant => "assistant".into(),
203 Role::System => "system".into(),
204 },
205 content: Some(message.string_contents()),
206 name: None,
207 function_call: None,
208 })
209 .collect::<Vec<_>>()
210}
211
212pub struct CopilotChatLanguageModel {
213 model: CopilotChatModel,
214 request_limiter: RateLimiter,
215}
216
217impl LanguageModel for CopilotChatLanguageModel {
218 fn id(&self) -> LanguageModelId {
219 LanguageModelId::from(self.model.id().to_string())
220 }
221
222 fn name(&self) -> LanguageModelName {
223 LanguageModelName::from(self.model.display_name().to_string())
224 }
225
226 fn provider_id(&self) -> LanguageModelProviderId {
227 PROVIDER_ID
228 }
229
230 fn provider_name(&self) -> LanguageModelProviderName {
231 PROVIDER_NAME
232 }
233
234 fn supports_tools(&self) -> bool {
235 self.model.supports_tools()
236 }
237
238 fn supports_images(&self) -> bool {
239 self.model.supports_vision()
240 }
241
242 fn tool_input_format(&self) -> LanguageModelToolSchemaFormat {
243 match self.model.vendor() {
244 ModelVendor::OpenAI | ModelVendor::Anthropic => {
245 LanguageModelToolSchemaFormat::JsonSchema
246 }
247 ModelVendor::Google | ModelVendor::XAI | ModelVendor::Unknown => {
248 LanguageModelToolSchemaFormat::JsonSchemaSubset
249 }
250 }
251 }
252
253 fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
254 match choice {
255 LanguageModelToolChoice::Auto
256 | LanguageModelToolChoice::Any
257 | LanguageModelToolChoice::None => self.supports_tools(),
258 }
259 }
260
261 fn telemetry_id(&self) -> String {
262 format!("copilot_chat/{}", self.model.id())
263 }
264
265 fn max_token_count(&self) -> u64 {
266 self.model.max_token_count()
267 }
268
269 fn count_tokens(
270 &self,
271 request: LanguageModelRequest,
272 cx: &App,
273 ) -> BoxFuture<'static, Result<u64>> {
274 let model = self.model.clone();
275 cx.background_spawn(async move {
276 let messages = collect_tiktoken_messages(request);
277 // Copilot uses OpenAI tiktoken tokenizer for all it's model irrespective of the underlying provider(vendor).
278 let tokenizer_model = match model.tokenizer() {
279 Some("o200k_base") => "gpt-4o",
280 Some("cl100k_base") => "gpt-4",
281 _ => "gpt-4o",
282 };
283
284 tiktoken_rs::num_tokens_from_messages(tokenizer_model, &messages)
285 .map(|tokens| tokens as u64)
286 })
287 .boxed()
288 }
289
290 fn stream_completion(
291 &self,
292 request: LanguageModelRequest,
293 cx: &AsyncApp,
294 ) -> BoxFuture<
295 'static,
296 Result<
297 BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>,
298 LanguageModelCompletionError,
299 >,
300 > {
301 let is_user_initiated = request.intent.is_none_or(|intent| match intent {
302 CompletionIntent::UserPrompt
303 | CompletionIntent::ThreadContextSummarization
304 | CompletionIntent::InlineAssist
305 | CompletionIntent::TerminalInlineAssist
306 | CompletionIntent::GenerateGitCommitMessage => true,
307
308 CompletionIntent::ToolResults
309 | CompletionIntent::ThreadSummarization
310 | CompletionIntent::CreateFile
311 | CompletionIntent::EditFile => false,
312 });
313
314 if self.model.supports_response() {
315 let responses_request = into_copilot_responses(&self.model, request);
316 let request_limiter = self.request_limiter.clone();
317 let future = cx.spawn(async move |cx| {
318 let request =
319 CopilotChat::stream_response(responses_request, is_user_initiated, cx.clone());
320 request_limiter
321 .stream(async move {
322 let stream = request.await?;
323 let mapper = CopilotResponsesEventMapper::new();
324 Ok(mapper.map_stream(stream).boxed())
325 })
326 .await
327 });
328 return async move { Ok(future.await?.boxed()) }.boxed();
329 }
330
331 let copilot_request = match into_copilot_chat(&self.model, request) {
332 Ok(request) => request,
333 Err(err) => return futures::future::ready(Err(err.into())).boxed(),
334 };
335 let is_streaming = copilot_request.stream;
336
337 let request_limiter = self.request_limiter.clone();
338 let future = cx.spawn(async move |cx| {
339 let request =
340 CopilotChat::stream_completion(copilot_request, is_user_initiated, cx.clone());
341 request_limiter
342 .stream(async move {
343 let response = request.await?;
344 Ok(map_to_language_model_completion_events(
345 response,
346 is_streaming,
347 ))
348 })
349 .await
350 });
351 async move { Ok(future.await?.boxed()) }.boxed()
352 }
353}
354
355pub fn map_to_language_model_completion_events(
356 events: Pin<Box<dyn Send + Stream<Item = Result<ResponseEvent>>>>,
357 is_streaming: bool,
358) -> impl Stream<Item = Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
359 #[derive(Default)]
360 struct RawToolCall {
361 id: String,
362 name: String,
363 arguments: String,
364 thought_signature: Option<String>,
365 }
366
367 struct State {
368 events: Pin<Box<dyn Send + Stream<Item = Result<ResponseEvent>>>>,
369 tool_calls_by_index: HashMap<usize, RawToolCall>,
370 }
371
372 futures::stream::unfold(
373 State {
374 events,
375 tool_calls_by_index: HashMap::default(),
376 },
377 move |mut state| async move {
378 if let Some(event) = state.events.next().await {
379 match event {
380 Ok(event) => {
381 let Some(choice) = event.choices.first() else {
382 return Some((
383 vec![Err(anyhow!("Response contained no choices").into())],
384 state,
385 ));
386 };
387
388 let delta = if is_streaming {
389 choice.delta.as_ref()
390 } else {
391 choice.message.as_ref()
392 };
393
394 let Some(delta) = delta else {
395 return Some((
396 vec![Err(anyhow!("Response contained no delta").into())],
397 state,
398 ));
399 };
400
401 let mut events = Vec::new();
402 if let Some(content) = delta.content.clone() {
403 events.push(Ok(LanguageModelCompletionEvent::Text(content)));
404 }
405
406 for (index, tool_call) in delta.tool_calls.iter().enumerate() {
407 let tool_index = tool_call.index.unwrap_or(index);
408 let entry = state.tool_calls_by_index.entry(tool_index).or_default();
409
410 if let Some(tool_id) = tool_call.id.clone() {
411 entry.id = tool_id;
412 }
413
414 if let Some(function) = tool_call.function.as_ref() {
415 if let Some(name) = function.name.clone() {
416 entry.name = name;
417 }
418
419 if let Some(arguments) = function.arguments.clone() {
420 entry.arguments.push_str(&arguments);
421 }
422
423 if let Some(thought_signature) = function.thought_signature.clone()
424 {
425 entry.thought_signature = Some(thought_signature);
426 }
427 }
428 }
429
430 if let Some(usage) = event.usage {
431 events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(
432 TokenUsage {
433 input_tokens: usage.prompt_tokens,
434 output_tokens: usage.completion_tokens,
435 cache_creation_input_tokens: 0,
436 cache_read_input_tokens: 0,
437 },
438 )));
439 }
440
441 match choice.finish_reason.as_deref() {
442 Some("stop") => {
443 events.push(Ok(LanguageModelCompletionEvent::Stop(
444 StopReason::EndTurn,
445 )));
446 }
447 Some("tool_calls") => {
448 events.extend(state.tool_calls_by_index.drain().map(
449 |(_, tool_call)| {
450 // The model can output an empty string
451 // to indicate the absence of arguments.
452 // When that happens, create an empty
453 // object instead.
454 let arguments = if tool_call.arguments.is_empty() {
455 Ok(serde_json::Value::Object(Default::default()))
456 } else {
457 serde_json::Value::from_str(&tool_call.arguments)
458 };
459 match arguments {
460 Ok(input) => Ok(LanguageModelCompletionEvent::ToolUse(
461 LanguageModelToolUse {
462 id: tool_call.id.into(),
463 name: tool_call.name.as_str().into(),
464 is_input_complete: true,
465 input,
466 raw_input: tool_call.arguments,
467 thought_signature: tool_call.thought_signature,
468 },
469 )),
470 Err(error) => Ok(
471 LanguageModelCompletionEvent::ToolUseJsonParseError {
472 id: tool_call.id.into(),
473 tool_name: tool_call.name.as_str().into(),
474 raw_input: tool_call.arguments.into(),
475 json_parse_error: error.to_string(),
476 },
477 ),
478 }
479 },
480 ));
481
482 events.push(Ok(LanguageModelCompletionEvent::Stop(
483 StopReason::ToolUse,
484 )));
485 }
486 Some(stop_reason) => {
487 log::error!("Unexpected Copilot Chat stop_reason: {stop_reason:?}");
488 events.push(Ok(LanguageModelCompletionEvent::Stop(
489 StopReason::EndTurn,
490 )));
491 }
492 None => {}
493 }
494
495 return Some((events, state));
496 }
497 Err(err) => return Some((vec![Err(anyhow!(err).into())], state)),
498 }
499 }
500
501 None
502 },
503 )
504 .flat_map(futures::stream::iter)
505}
506
507pub struct CopilotResponsesEventMapper {
508 pending_stop_reason: Option<StopReason>,
509}
510
511impl CopilotResponsesEventMapper {
512 pub fn new() -> Self {
513 Self {
514 pending_stop_reason: None,
515 }
516 }
517
518 pub fn map_stream(
519 mut self,
520 events: Pin<Box<dyn Send + Stream<Item = Result<copilot::copilot_responses::StreamEvent>>>>,
521 ) -> impl Stream<Item = Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
522 {
523 events.flat_map(move |event| {
524 futures::stream::iter(match event {
525 Ok(event) => self.map_event(event),
526 Err(error) => vec![Err(LanguageModelCompletionError::from(anyhow!(error)))],
527 })
528 })
529 }
530
531 fn map_event(
532 &mut self,
533 event: copilot::copilot_responses::StreamEvent,
534 ) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
535 match event {
536 copilot::copilot_responses::StreamEvent::OutputItemAdded { item, .. } => match item {
537 copilot::copilot_responses::ResponseOutputItem::Message { id, .. } => {
538 vec![Ok(LanguageModelCompletionEvent::StartMessage {
539 message_id: id,
540 })]
541 }
542 _ => Vec::new(),
543 },
544
545 copilot::copilot_responses::StreamEvent::OutputTextDelta { delta, .. } => {
546 if delta.is_empty() {
547 Vec::new()
548 } else {
549 vec![Ok(LanguageModelCompletionEvent::Text(delta))]
550 }
551 }
552
553 copilot::copilot_responses::StreamEvent::OutputItemDone { item, .. } => match item {
554 copilot::copilot_responses::ResponseOutputItem::Message { .. } => Vec::new(),
555 copilot::copilot_responses::ResponseOutputItem::FunctionCall {
556 call_id,
557 name,
558 arguments,
559 thought_signature,
560 ..
561 } => {
562 let mut events = Vec::new();
563 match serde_json::from_str::<serde_json::Value>(&arguments) {
564 Ok(input) => events.push(Ok(LanguageModelCompletionEvent::ToolUse(
565 LanguageModelToolUse {
566 id: call_id.into(),
567 name: name.as_str().into(),
568 is_input_complete: true,
569 input,
570 raw_input: arguments.clone(),
571 thought_signature,
572 },
573 ))),
574 Err(error) => {
575 events.push(Ok(LanguageModelCompletionEvent::ToolUseJsonParseError {
576 id: call_id.into(),
577 tool_name: name.as_str().into(),
578 raw_input: arguments.clone().into(),
579 json_parse_error: error.to_string(),
580 }))
581 }
582 }
583 // Record that we already emitted a tool-use stop so we can avoid duplicating
584 // a Stop event on Completed.
585 self.pending_stop_reason = Some(StopReason::ToolUse);
586 events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::ToolUse)));
587 events
588 }
589 copilot::copilot_responses::ResponseOutputItem::Reasoning {
590 summary,
591 encrypted_content,
592 ..
593 } => {
594 let mut events = Vec::new();
595
596 if let Some(blocks) = summary {
597 let mut text = String::new();
598 for block in blocks {
599 text.push_str(&block.text);
600 }
601 if !text.is_empty() {
602 events.push(Ok(LanguageModelCompletionEvent::Thinking {
603 text,
604 signature: None,
605 }));
606 }
607 }
608
609 if let Some(data) = encrypted_content {
610 events.push(Ok(LanguageModelCompletionEvent::RedactedThinking { data }));
611 }
612
613 events
614 }
615 },
616
617 copilot::copilot_responses::StreamEvent::Completed { response } => {
618 let mut events = Vec::new();
619 if let Some(usage) = response.usage {
620 events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
621 input_tokens: usage.input_tokens.unwrap_or(0),
622 output_tokens: usage.output_tokens.unwrap_or(0),
623 cache_creation_input_tokens: 0,
624 cache_read_input_tokens: 0,
625 })));
626 }
627 if self.pending_stop_reason.take() != Some(StopReason::ToolUse) {
628 events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::EndTurn)));
629 }
630 events
631 }
632
633 copilot::copilot_responses::StreamEvent::Incomplete { response } => {
634 let reason = response
635 .incomplete_details
636 .as_ref()
637 .and_then(|details| details.reason.as_ref());
638 let stop_reason = match reason {
639 Some(copilot::copilot_responses::IncompleteReason::MaxOutputTokens) => {
640 StopReason::MaxTokens
641 }
642 Some(copilot::copilot_responses::IncompleteReason::ContentFilter) => {
643 StopReason::Refusal
644 }
645 _ => self
646 .pending_stop_reason
647 .take()
648 .unwrap_or(StopReason::EndTurn),
649 };
650
651 let mut events = Vec::new();
652 if let Some(usage) = response.usage {
653 events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
654 input_tokens: usage.input_tokens.unwrap_or(0),
655 output_tokens: usage.output_tokens.unwrap_or(0),
656 cache_creation_input_tokens: 0,
657 cache_read_input_tokens: 0,
658 })));
659 }
660 events.push(Ok(LanguageModelCompletionEvent::Stop(stop_reason)));
661 events
662 }
663
664 copilot::copilot_responses::StreamEvent::Failed { response } => {
665 let provider = PROVIDER_NAME;
666 let (status_code, message) = match response.error {
667 Some(error) => {
668 let status_code = StatusCode::from_str(&error.code)
669 .unwrap_or(StatusCode::INTERNAL_SERVER_ERROR);
670 (status_code, error.message)
671 }
672 None => (
673 StatusCode::INTERNAL_SERVER_ERROR,
674 "response.failed".to_string(),
675 ),
676 };
677 vec![Err(LanguageModelCompletionError::HttpResponseError {
678 provider,
679 status_code,
680 message,
681 })]
682 }
683
684 copilot::copilot_responses::StreamEvent::GenericError { error } => vec![Err(
685 LanguageModelCompletionError::Other(anyhow!(format!("{error:?}"))),
686 )],
687
688 copilot::copilot_responses::StreamEvent::Created { .. }
689 | copilot::copilot_responses::StreamEvent::Unknown => Vec::new(),
690 }
691 }
692}
693
694fn into_copilot_chat(
695 model: &copilot::copilot_chat::Model,
696 request: LanguageModelRequest,
697) -> Result<CopilotChatRequest> {
698 let mut request_messages: Vec<LanguageModelRequestMessage> = Vec::new();
699 for message in request.messages {
700 if let Some(last_message) = request_messages.last_mut() {
701 if last_message.role == message.role {
702 last_message.content.extend(message.content);
703 } else {
704 request_messages.push(message);
705 }
706 } else {
707 request_messages.push(message);
708 }
709 }
710
711 let mut messages: Vec<ChatMessage> = Vec::new();
712 for message in request_messages {
713 match message.role {
714 Role::User => {
715 for content in &message.content {
716 if let MessageContent::ToolResult(tool_result) = content {
717 let content = match &tool_result.content {
718 LanguageModelToolResultContent::Text(text) => text.to_string().into(),
719 LanguageModelToolResultContent::Image(image) => {
720 if model.supports_vision() {
721 ChatMessageContent::Multipart(vec![ChatMessagePart::Image {
722 image_url: ImageUrl {
723 url: image.to_base64_url(),
724 },
725 }])
726 } else {
727 debug_panic!(
728 "This should be caught at {} level",
729 tool_result.tool_name
730 );
731 "[Tool responded with an image, but this model does not support vision]".to_string().into()
732 }
733 }
734 };
735
736 messages.push(ChatMessage::Tool {
737 tool_call_id: tool_result.tool_use_id.to_string(),
738 content,
739 });
740 }
741 }
742
743 let mut content_parts = Vec::new();
744 for content in &message.content {
745 match content {
746 MessageContent::Text(text) | MessageContent::Thinking { text, .. }
747 if !text.is_empty() =>
748 {
749 if let Some(ChatMessagePart::Text { text: text_content }) =
750 content_parts.last_mut()
751 {
752 text_content.push_str(text);
753 } else {
754 content_parts.push(ChatMessagePart::Text {
755 text: text.to_string(),
756 });
757 }
758 }
759 MessageContent::Image(image) if model.supports_vision() => {
760 content_parts.push(ChatMessagePart::Image {
761 image_url: ImageUrl {
762 url: image.to_base64_url(),
763 },
764 });
765 }
766 _ => {}
767 }
768 }
769
770 if !content_parts.is_empty() {
771 messages.push(ChatMessage::User {
772 content: content_parts.into(),
773 });
774 }
775 }
776 Role::Assistant => {
777 let mut tool_calls = Vec::new();
778 for content in &message.content {
779 if let MessageContent::ToolUse(tool_use) = content {
780 tool_calls.push(ToolCall {
781 id: tool_use.id.to_string(),
782 content: copilot::copilot_chat::ToolCallContent::Function {
783 function: copilot::copilot_chat::FunctionContent {
784 name: tool_use.name.to_string(),
785 arguments: serde_json::to_string(&tool_use.input)?,
786 thought_signature: tool_use.thought_signature.clone(),
787 },
788 },
789 });
790 }
791 }
792
793 let text_content = {
794 let mut buffer = String::new();
795 for string in message.content.iter().filter_map(|content| match content {
796 MessageContent::Text(text) | MessageContent::Thinking { text, .. } => {
797 Some(text.as_str())
798 }
799 MessageContent::ToolUse(_)
800 | MessageContent::RedactedThinking(_)
801 | MessageContent::ToolResult(_)
802 | MessageContent::Image(_) => None,
803 }) {
804 buffer.push_str(string);
805 }
806
807 buffer
808 };
809
810 messages.push(ChatMessage::Assistant {
811 content: if text_content.is_empty() {
812 ChatMessageContent::empty()
813 } else {
814 text_content.into()
815 },
816 tool_calls,
817 });
818 }
819 Role::System => messages.push(ChatMessage::System {
820 content: message.string_contents(),
821 }),
822 }
823 }
824
825 let tools = request
826 .tools
827 .iter()
828 .map(|tool| Tool::Function {
829 function: copilot::copilot_chat::Function {
830 name: tool.name.clone(),
831 description: tool.description.clone(),
832 parameters: tool.input_schema.clone(),
833 },
834 })
835 .collect::<Vec<_>>();
836
837 Ok(CopilotChatRequest {
838 intent: true,
839 n: 1,
840 stream: model.uses_streaming(),
841 temperature: 0.1,
842 model: model.id().to_string(),
843 messages,
844 tools,
845 tool_choice: request.tool_choice.map(|choice| match choice {
846 LanguageModelToolChoice::Auto => copilot::copilot_chat::ToolChoice::Auto,
847 LanguageModelToolChoice::Any => copilot::copilot_chat::ToolChoice::Any,
848 LanguageModelToolChoice::None => copilot::copilot_chat::ToolChoice::None,
849 }),
850 })
851}
852
853fn into_copilot_responses(
854 model: &copilot::copilot_chat::Model,
855 request: LanguageModelRequest,
856) -> copilot::copilot_responses::Request {
857 use copilot::copilot_responses as responses;
858
859 let LanguageModelRequest {
860 thread_id: _,
861 prompt_id: _,
862 intent: _,
863 mode: _,
864 messages,
865 tools,
866 tool_choice,
867 stop: _,
868 temperature,
869 thinking_allowed: _,
870 } = request;
871
872 let mut input_items: Vec<responses::ResponseInputItem> = Vec::new();
873
874 for message in messages {
875 match message.role {
876 Role::User => {
877 for content in &message.content {
878 if let MessageContent::ToolResult(tool_result) = content {
879 let output = if let Some(out) = &tool_result.output {
880 match out {
881 serde_json::Value::String(s) => {
882 responses::ResponseFunctionOutput::Text(s.clone())
883 }
884 serde_json::Value::Null => {
885 responses::ResponseFunctionOutput::Text(String::new())
886 }
887 other => responses::ResponseFunctionOutput::Text(other.to_string()),
888 }
889 } else {
890 match &tool_result.content {
891 LanguageModelToolResultContent::Text(text) => {
892 responses::ResponseFunctionOutput::Text(text.to_string())
893 }
894 LanguageModelToolResultContent::Image(image) => {
895 if model.supports_vision() {
896 responses::ResponseFunctionOutput::Content(vec![
897 responses::ResponseInputContent::InputImage {
898 image_url: Some(image.to_base64_url()),
899 detail: Default::default(),
900 },
901 ])
902 } else {
903 debug_panic!(
904 "This should be caught at {} level",
905 tool_result.tool_name
906 );
907 responses::ResponseFunctionOutput::Text(
908 "[Tool responded with an image, but this model does not support vision]".into(),
909 )
910 }
911 }
912 }
913 };
914
915 input_items.push(responses::ResponseInputItem::FunctionCallOutput {
916 call_id: tool_result.tool_use_id.to_string(),
917 output,
918 status: None,
919 });
920 }
921 }
922
923 let mut parts: Vec<responses::ResponseInputContent> = Vec::new();
924 for content in &message.content {
925 match content {
926 MessageContent::Text(text) => {
927 parts.push(responses::ResponseInputContent::InputText {
928 text: text.clone(),
929 });
930 }
931
932 MessageContent::Image(image) => {
933 if model.supports_vision() {
934 parts.push(responses::ResponseInputContent::InputImage {
935 image_url: Some(image.to_base64_url()),
936 detail: Default::default(),
937 });
938 }
939 }
940 _ => {}
941 }
942 }
943
944 if !parts.is_empty() {
945 input_items.push(responses::ResponseInputItem::Message {
946 role: "user".into(),
947 content: Some(parts),
948 status: None,
949 });
950 }
951 }
952
953 Role::Assistant => {
954 for content in &message.content {
955 if let MessageContent::ToolUse(tool_use) = content {
956 input_items.push(responses::ResponseInputItem::FunctionCall {
957 call_id: tool_use.id.to_string(),
958 name: tool_use.name.to_string(),
959 arguments: tool_use.raw_input.clone(),
960 status: None,
961 thought_signature: tool_use.thought_signature.clone(),
962 });
963 }
964 }
965
966 for content in &message.content {
967 if let MessageContent::RedactedThinking(data) = content {
968 input_items.push(responses::ResponseInputItem::Reasoning {
969 id: None,
970 summary: Vec::new(),
971 encrypted_content: data.clone(),
972 });
973 }
974 }
975
976 let mut parts: Vec<responses::ResponseInputContent> = Vec::new();
977 for content in &message.content {
978 match content {
979 MessageContent::Text(text) => {
980 parts.push(responses::ResponseInputContent::OutputText {
981 text: text.clone(),
982 });
983 }
984 MessageContent::Image(_) => {
985 parts.push(responses::ResponseInputContent::OutputText {
986 text: "[image omitted]".to_string(),
987 });
988 }
989 _ => {}
990 }
991 }
992
993 if !parts.is_empty() {
994 input_items.push(responses::ResponseInputItem::Message {
995 role: "assistant".into(),
996 content: Some(parts),
997 status: Some("completed".into()),
998 });
999 }
1000 }
1001
1002 Role::System => {
1003 let mut parts: Vec<responses::ResponseInputContent> = Vec::new();
1004 for content in &message.content {
1005 if let MessageContent::Text(text) = content {
1006 parts.push(responses::ResponseInputContent::InputText {
1007 text: text.clone(),
1008 });
1009 }
1010 }
1011
1012 if !parts.is_empty() {
1013 input_items.push(responses::ResponseInputItem::Message {
1014 role: "system".into(),
1015 content: Some(parts),
1016 status: None,
1017 });
1018 }
1019 }
1020 }
1021 }
1022
1023 let converted_tools: Vec<responses::ToolDefinition> = tools
1024 .into_iter()
1025 .map(|tool| responses::ToolDefinition::Function {
1026 name: tool.name,
1027 description: Some(tool.description),
1028 parameters: Some(tool.input_schema),
1029 strict: None,
1030 })
1031 .collect();
1032
1033 let mapped_tool_choice = tool_choice.map(|choice| match choice {
1034 LanguageModelToolChoice::Auto => responses::ToolChoice::Auto,
1035 LanguageModelToolChoice::Any => responses::ToolChoice::Any,
1036 LanguageModelToolChoice::None => responses::ToolChoice::None,
1037 });
1038
1039 responses::Request {
1040 model: model.id().to_string(),
1041 input: input_items,
1042 stream: model.uses_streaming(),
1043 temperature,
1044 tools: converted_tools,
1045 tool_choice: mapped_tool_choice,
1046 reasoning: None, // We would need to add support for setting from user settings.
1047 include: Some(vec![
1048 copilot::copilot_responses::ResponseIncludable::ReasoningEncryptedContent,
1049 ]),
1050 }
1051}
1052
1053#[cfg(test)]
1054mod tests {
1055 use super::*;
1056 use copilot::copilot_responses as responses;
1057 use futures::StreamExt;
1058
1059 fn map_events(events: Vec<responses::StreamEvent>) -> Vec<LanguageModelCompletionEvent> {
1060 futures::executor::block_on(async {
1061 CopilotResponsesEventMapper::new()
1062 .map_stream(Box::pin(futures::stream::iter(events.into_iter().map(Ok))))
1063 .collect::<Vec<_>>()
1064 .await
1065 .into_iter()
1066 .map(Result::unwrap)
1067 .collect()
1068 })
1069 }
1070
1071 #[test]
1072 fn responses_stream_maps_text_and_usage() {
1073 let events = vec![
1074 responses::StreamEvent::OutputItemAdded {
1075 output_index: 0,
1076 sequence_number: None,
1077 item: responses::ResponseOutputItem::Message {
1078 id: "msg_1".into(),
1079 role: "assistant".into(),
1080 content: Some(Vec::new()),
1081 },
1082 },
1083 responses::StreamEvent::OutputTextDelta {
1084 item_id: "msg_1".into(),
1085 output_index: 0,
1086 delta: "Hello".into(),
1087 },
1088 responses::StreamEvent::Completed {
1089 response: responses::Response {
1090 usage: Some(responses::ResponseUsage {
1091 input_tokens: Some(5),
1092 output_tokens: Some(3),
1093 total_tokens: Some(8),
1094 }),
1095 ..Default::default()
1096 },
1097 },
1098 ];
1099
1100 let mapped = map_events(events);
1101 assert!(matches!(
1102 mapped[0],
1103 LanguageModelCompletionEvent::StartMessage { ref message_id } if message_id == "msg_1"
1104 ));
1105 assert!(matches!(
1106 mapped[1],
1107 LanguageModelCompletionEvent::Text(ref text) if text == "Hello"
1108 ));
1109 assert!(matches!(
1110 mapped[2],
1111 LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
1112 input_tokens: 5,
1113 output_tokens: 3,
1114 ..
1115 })
1116 ));
1117 assert!(matches!(
1118 mapped[3],
1119 LanguageModelCompletionEvent::Stop(StopReason::EndTurn)
1120 ));
1121 }
1122
1123 #[test]
1124 fn responses_stream_maps_tool_calls() {
1125 let events = vec![responses::StreamEvent::OutputItemDone {
1126 output_index: 0,
1127 sequence_number: None,
1128 item: responses::ResponseOutputItem::FunctionCall {
1129 id: Some("fn_1".into()),
1130 call_id: "call_1".into(),
1131 name: "do_it".into(),
1132 arguments: "{\"x\":1}".into(),
1133 status: None,
1134 thought_signature: None,
1135 },
1136 }];
1137
1138 let mapped = map_events(events);
1139 assert!(matches!(
1140 mapped[0],
1141 LanguageModelCompletionEvent::ToolUse(ref use_) if use_.id.to_string() == "call_1" && use_.name.as_ref() == "do_it"
1142 ));
1143 assert!(matches!(
1144 mapped[1],
1145 LanguageModelCompletionEvent::Stop(StopReason::ToolUse)
1146 ));
1147 }
1148
1149 #[test]
1150 fn responses_stream_handles_json_parse_error() {
1151 let events = vec![responses::StreamEvent::OutputItemDone {
1152 output_index: 0,
1153 sequence_number: None,
1154 item: responses::ResponseOutputItem::FunctionCall {
1155 id: Some("fn_1".into()),
1156 call_id: "call_1".into(),
1157 name: "do_it".into(),
1158 arguments: "{not json}".into(),
1159 status: None,
1160 thought_signature: None,
1161 },
1162 }];
1163
1164 let mapped = map_events(events);
1165 assert!(matches!(
1166 mapped[0],
1167 LanguageModelCompletionEvent::ToolUseJsonParseError { ref id, ref tool_name, .. }
1168 if id.to_string() == "call_1" && tool_name.as_ref() == "do_it"
1169 ));
1170 assert!(matches!(
1171 mapped[1],
1172 LanguageModelCompletionEvent::Stop(StopReason::ToolUse)
1173 ));
1174 }
1175
1176 #[test]
1177 fn responses_stream_maps_reasoning_summary_and_encrypted_content() {
1178 let events = vec![responses::StreamEvent::OutputItemDone {
1179 output_index: 0,
1180 sequence_number: None,
1181 item: responses::ResponseOutputItem::Reasoning {
1182 id: "r1".into(),
1183 summary: Some(vec![responses::ResponseReasoningItem {
1184 kind: "summary_text".into(),
1185 text: "Chain".into(),
1186 }]),
1187 encrypted_content: Some("ENC".into()),
1188 },
1189 }];
1190
1191 let mapped = map_events(events);
1192 assert!(matches!(
1193 mapped[0],
1194 LanguageModelCompletionEvent::Thinking { ref text, signature: None } if text == "Chain"
1195 ));
1196 assert!(matches!(
1197 mapped[1],
1198 LanguageModelCompletionEvent::RedactedThinking { ref data } if data == "ENC"
1199 ));
1200 }
1201
1202 #[test]
1203 fn responses_stream_handles_incomplete_max_tokens() {
1204 let events = vec![responses::StreamEvent::Incomplete {
1205 response: responses::Response {
1206 usage: Some(responses::ResponseUsage {
1207 input_tokens: Some(10),
1208 output_tokens: Some(0),
1209 total_tokens: Some(10),
1210 }),
1211 incomplete_details: Some(responses::IncompleteDetails {
1212 reason: Some(responses::IncompleteReason::MaxOutputTokens),
1213 }),
1214 ..Default::default()
1215 },
1216 }];
1217
1218 let mapped = map_events(events);
1219 assert!(matches!(
1220 mapped[0],
1221 LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
1222 input_tokens: 10,
1223 output_tokens: 0,
1224 ..
1225 })
1226 ));
1227 assert!(matches!(
1228 mapped[1],
1229 LanguageModelCompletionEvent::Stop(StopReason::MaxTokens)
1230 ));
1231 }
1232
1233 #[test]
1234 fn responses_stream_handles_incomplete_content_filter() {
1235 let events = vec![responses::StreamEvent::Incomplete {
1236 response: responses::Response {
1237 usage: None,
1238 incomplete_details: Some(responses::IncompleteDetails {
1239 reason: Some(responses::IncompleteReason::ContentFilter),
1240 }),
1241 ..Default::default()
1242 },
1243 }];
1244
1245 let mapped = map_events(events);
1246 assert!(matches!(
1247 mapped.last().unwrap(),
1248 LanguageModelCompletionEvent::Stop(StopReason::Refusal)
1249 ));
1250 }
1251
1252 #[test]
1253 fn responses_stream_completed_no_duplicate_after_tool_use() {
1254 let events = vec![
1255 responses::StreamEvent::OutputItemDone {
1256 output_index: 0,
1257 sequence_number: None,
1258 item: responses::ResponseOutputItem::FunctionCall {
1259 id: Some("fn_1".into()),
1260 call_id: "call_1".into(),
1261 name: "do_it".into(),
1262 arguments: "{}".into(),
1263 status: None,
1264 thought_signature: None,
1265 },
1266 },
1267 responses::StreamEvent::Completed {
1268 response: responses::Response::default(),
1269 },
1270 ];
1271
1272 let mapped = map_events(events);
1273
1274 let mut stop_count = 0usize;
1275 let mut saw_tool_use_stop = false;
1276 for event in mapped {
1277 if let LanguageModelCompletionEvent::Stop(reason) = event {
1278 stop_count += 1;
1279 if matches!(reason, StopReason::ToolUse) {
1280 saw_tool_use_stop = true;
1281 }
1282 }
1283 }
1284 assert_eq!(stop_count, 1, "should emit exactly one Stop event");
1285 assert!(saw_tool_use_stop, "Stop reason should be ToolUse");
1286 }
1287
1288 #[test]
1289 fn responses_stream_failed_maps_http_response_error() {
1290 let events = vec![responses::StreamEvent::Failed {
1291 response: responses::Response {
1292 error: Some(responses::ResponseError {
1293 code: "429".into(),
1294 message: "too many requests".into(),
1295 }),
1296 ..Default::default()
1297 },
1298 }];
1299
1300 let mapped_results = futures::executor::block_on(async {
1301 CopilotResponsesEventMapper::new()
1302 .map_stream(Box::pin(futures::stream::iter(events.into_iter().map(Ok))))
1303 .collect::<Vec<_>>()
1304 .await
1305 });
1306
1307 assert_eq!(mapped_results.len(), 1);
1308 match &mapped_results[0] {
1309 Err(LanguageModelCompletionError::HttpResponseError {
1310 status_code,
1311 message,
1312 ..
1313 }) => {
1314 assert_eq!(*status_code, http_client::StatusCode::TOO_MANY_REQUESTS);
1315 assert_eq!(message, "too many requests");
1316 }
1317 other => panic!("expected HttpResponseError, got {:?}", other),
1318 }
1319 }
1320}
1321struct ConfigurationView {
1322 copilot_status: Option<copilot::Status>,
1323 state: Entity<State>,
1324 _subscription: Option<Subscription>,
1325}
1326
1327impl ConfigurationView {
1328 pub fn new(state: Entity<State>, cx: &mut Context<Self>) -> Self {
1329 let copilot = Copilot::global(cx);
1330
1331 Self {
1332 copilot_status: copilot.as_ref().map(|copilot| copilot.read(cx).status()),
1333 state,
1334 _subscription: copilot.as_ref().map(|copilot| {
1335 cx.observe(copilot, |this, model, cx| {
1336 this.copilot_status = Some(model.read(cx).status());
1337 cx.notify();
1338 })
1339 }),
1340 }
1341 }
1342}
1343
1344impl Render for ConfigurationView {
1345 fn render(&mut self, _window: &mut Window, cx: &mut Context<Self>) -> impl IntoElement {
1346 if self.state.read(cx).is_authenticated(cx) {
1347 ConfiguredApiCard::new("Authorized")
1348 .button_label("Sign Out")
1349 .on_click(|_, window, cx| {
1350 window.dispatch_action(copilot::SignOut.boxed_clone(), cx);
1351 })
1352 .into_any_element()
1353 } else {
1354 let loading_icon = Icon::new(IconName::ArrowCircle).with_rotate_animation(4);
1355
1356 const ERROR_LABEL: &str = "Copilot Chat requires an active GitHub Copilot subscription. Please ensure Copilot is configured and try again, or use a different Assistant provider.";
1357
1358 match &self.copilot_status {
1359 Some(status) => match status {
1360 Status::Starting { task: _ } => h_flex()
1361 .gap_2()
1362 .child(loading_icon)
1363 .child(Label::new("Starting Copilot…"))
1364 .into_any_element(),
1365 Status::SigningIn { prompt: _ }
1366 | Status::SignedOut {
1367 awaiting_signing_in: true,
1368 } => h_flex()
1369 .gap_2()
1370 .child(loading_icon)
1371 .child(Label::new("Signing into Copilot…"))
1372 .into_any_element(),
1373 Status::Error(_) => {
1374 const LABEL: &str = "Copilot had issues starting. Please try restarting it. If the issue persists, try reinstalling Copilot.";
1375 v_flex()
1376 .gap_6()
1377 .child(Label::new(LABEL))
1378 .child(svg().size_8().path(IconName::CopilotError.path()))
1379 .into_any_element()
1380 }
1381 _ => {
1382 const LABEL: &str = "To use Zed's agent with GitHub Copilot, you need to be logged in to GitHub. Note that your GitHub account must have an active Copilot Chat subscription.";
1383
1384 v_flex()
1385 .gap_2()
1386 .child(Label::new(LABEL))
1387 .child(
1388 Button::new("sign_in", "Sign in to use GitHub Copilot")
1389 .full_width()
1390 .style(ButtonStyle::Outlined)
1391 .icon_color(Color::Muted)
1392 .icon(IconName::Github)
1393 .icon_position(IconPosition::Start)
1394 .icon_size(IconSize::Small)
1395 .on_click(|_, window, cx| {
1396 copilot::initiate_sign_in(window, cx)
1397 }),
1398 )
1399 .into_any_element()
1400 }
1401 },
1402 None => v_flex()
1403 .gap_6()
1404 .child(Label::new(ERROR_LABEL))
1405 .into_any_element(),
1406 }
1407 }
1408 }
1409}