1use std::pin::Pin;
2use std::str::FromStr as _;
3use std::sync::Arc;
4
5use anyhow::{Result, anyhow};
6use cloud_llm_client::CompletionIntent;
7use collections::HashMap;
8use copilot::copilot_chat::{
9 ChatMessage, ChatMessageContent, ChatMessagePart, CopilotChat, ImageUrl,
10 Model as CopilotChatModel, ModelVendor, Request as CopilotChatRequest, ResponseEvent, Tool,
11 ToolCall,
12};
13use copilot::{Copilot, Status};
14use futures::future::BoxFuture;
15use futures::stream::BoxStream;
16use futures::{FutureExt, Stream, StreamExt};
17use gpui::{Action, AnyView, App, AsyncApp, Entity, Render, Subscription, Task, svg};
18use http_client::StatusCode;
19use language::language_settings::all_language_settings;
20use language_model::{
21 AuthenticateError, LanguageModel, LanguageModelCompletionError, LanguageModelCompletionEvent,
22 LanguageModelId, LanguageModelName, LanguageModelProvider, LanguageModelProviderId,
23 LanguageModelProviderName, LanguageModelProviderState, LanguageModelRequest,
24 LanguageModelRequestMessage, LanguageModelToolChoice, LanguageModelToolResultContent,
25 LanguageModelToolSchemaFormat, LanguageModelToolUse, MessageContent, RateLimiter, Role,
26 StopReason, TokenUsage,
27};
28use settings::SettingsStore;
29use ui::{CommonAnimationExt, prelude::*};
30use util::debug_panic;
31
32use crate::ui::ConfiguredApiCard;
33
34const PROVIDER_ID: LanguageModelProviderId = LanguageModelProviderId::new("copilot_chat");
35const PROVIDER_NAME: LanguageModelProviderName =
36 LanguageModelProviderName::new("GitHub Copilot Chat");
37
38pub struct CopilotChatLanguageModelProvider {
39 state: Entity<State>,
40}
41
42pub struct State {
43 _copilot_chat_subscription: Option<Subscription>,
44 _settings_subscription: Subscription,
45}
46
47impl State {
48 fn is_authenticated(&self, cx: &App) -> bool {
49 CopilotChat::global(cx)
50 .map(|m| m.read(cx).is_authenticated())
51 .unwrap_or(false)
52 }
53}
54
55impl CopilotChatLanguageModelProvider {
56 pub fn new(cx: &mut App) -> Self {
57 let state = cx.new(|cx| {
58 let copilot_chat_subscription = CopilotChat::global(cx)
59 .map(|copilot_chat| cx.observe(&copilot_chat, |_, _, cx| cx.notify()));
60 State {
61 _copilot_chat_subscription: copilot_chat_subscription,
62 _settings_subscription: cx.observe_global::<SettingsStore>(|_, cx| {
63 if let Some(copilot_chat) = CopilotChat::global(cx) {
64 let language_settings = all_language_settings(None, cx);
65 let configuration = copilot::copilot_chat::CopilotChatConfiguration {
66 enterprise_uri: language_settings
67 .edit_predictions
68 .copilot
69 .enterprise_uri
70 .clone(),
71 };
72 copilot_chat.update(cx, |chat, cx| {
73 chat.set_configuration(configuration, cx);
74 });
75 }
76 cx.notify();
77 }),
78 }
79 });
80
81 Self { state }
82 }
83
84 fn create_language_model(&self, model: CopilotChatModel) -> Arc<dyn LanguageModel> {
85 Arc::new(CopilotChatLanguageModel {
86 model,
87 request_limiter: RateLimiter::new(4),
88 })
89 }
90}
91
92impl LanguageModelProviderState for CopilotChatLanguageModelProvider {
93 type ObservableEntity = State;
94
95 fn observable_entity(&self) -> Option<Entity<Self::ObservableEntity>> {
96 Some(self.state.clone())
97 }
98}
99
100impl LanguageModelProvider for CopilotChatLanguageModelProvider {
101 fn id(&self) -> LanguageModelProviderId {
102 PROVIDER_ID
103 }
104
105 fn name(&self) -> LanguageModelProviderName {
106 PROVIDER_NAME
107 }
108
109 fn icon(&self) -> IconName {
110 IconName::Copilot
111 }
112
113 fn default_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
114 let models = CopilotChat::global(cx).and_then(|m| m.read(cx).models())?;
115 models
116 .first()
117 .map(|model| self.create_language_model(model.clone()))
118 }
119
120 fn default_fast_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
121 // The default model should be Copilot Chat's 'base model', which is likely a relatively fast
122 // model (e.g. 4o) and a sensible choice when considering premium requests
123 self.default_model(cx)
124 }
125
126 fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
127 let Some(models) = CopilotChat::global(cx).and_then(|m| m.read(cx).models()) else {
128 return Vec::new();
129 };
130 models
131 .iter()
132 .map(|model| self.create_language_model(model.clone()))
133 .collect()
134 }
135
136 fn is_authenticated(&self, cx: &App) -> bool {
137 self.state.read(cx).is_authenticated(cx)
138 }
139
140 fn authenticate(&self, cx: &mut App) -> Task<Result<(), AuthenticateError>> {
141 if self.is_authenticated(cx) {
142 return Task::ready(Ok(()));
143 };
144
145 let Some(copilot) = Copilot::global(cx) else {
146 return Task::ready(Err(anyhow!(concat!(
147 "Copilot must be enabled for Copilot Chat to work. ",
148 "Please enable Copilot and try again."
149 ))
150 .into()));
151 };
152
153 let err = match copilot.read(cx).status() {
154 Status::Authorized => return Task::ready(Ok(())),
155 Status::Disabled => anyhow!(
156 "Copilot must be enabled for Copilot Chat to work. Please enable Copilot and try again."
157 ),
158 Status::Error(err) => anyhow!(format!(
159 "Received the following error while signing into Copilot: {err}"
160 )),
161 Status::Starting { task: _ } => anyhow!(
162 "Copilot is still starting, please wait for Copilot to start then try again"
163 ),
164 Status::Unauthorized => anyhow!(
165 "Unable to authorize with Copilot. Please make sure that you have an active Copilot and Copilot Chat subscription."
166 ),
167 Status::SignedOut { .. } => {
168 anyhow!("You have signed out of Copilot. Please sign in to Copilot and try again.")
169 }
170 Status::SigningIn { prompt: _ } => anyhow!("Still signing into Copilot..."),
171 };
172
173 Task::ready(Err(err.into()))
174 }
175
176 fn configuration_view(
177 &self,
178 _target_agent: language_model::ConfigurationViewTargetAgent,
179 _: &mut Window,
180 cx: &mut App,
181 ) -> AnyView {
182 let state = self.state.clone();
183 cx.new(|cx| ConfigurationView::new(state, cx)).into()
184 }
185
186 fn reset_credentials(&self, _cx: &mut App) -> Task<Result<()>> {
187 Task::ready(Err(anyhow!(
188 "Signing out of GitHub Copilot Chat is currently not supported."
189 )))
190 }
191}
192
193fn collect_tiktoken_messages(
194 request: LanguageModelRequest,
195) -> Vec<tiktoken_rs::ChatCompletionRequestMessage> {
196 request
197 .messages
198 .into_iter()
199 .map(|message| tiktoken_rs::ChatCompletionRequestMessage {
200 role: match message.role {
201 Role::User => "user".into(),
202 Role::Assistant => "assistant".into(),
203 Role::System => "system".into(),
204 },
205 content: Some(message.string_contents()),
206 name: None,
207 function_call: None,
208 })
209 .collect::<Vec<_>>()
210}
211
212pub struct CopilotChatLanguageModel {
213 model: CopilotChatModel,
214 request_limiter: RateLimiter,
215}
216
217impl LanguageModel for CopilotChatLanguageModel {
218 fn id(&self) -> LanguageModelId {
219 LanguageModelId::from(self.model.id().to_string())
220 }
221
222 fn name(&self) -> LanguageModelName {
223 LanguageModelName::from(self.model.display_name().to_string())
224 }
225
226 fn provider_id(&self) -> LanguageModelProviderId {
227 PROVIDER_ID
228 }
229
230 fn provider_name(&self) -> LanguageModelProviderName {
231 PROVIDER_NAME
232 }
233
234 fn supports_tools(&self) -> bool {
235 self.model.supports_tools()
236 }
237
238 fn supports_images(&self) -> bool {
239 self.model.supports_vision()
240 }
241
242 fn tool_input_format(&self) -> LanguageModelToolSchemaFormat {
243 match self.model.vendor() {
244 ModelVendor::OpenAI | ModelVendor::Anthropic => {
245 LanguageModelToolSchemaFormat::JsonSchema
246 }
247 ModelVendor::Google | ModelVendor::XAI | ModelVendor::Unknown => {
248 LanguageModelToolSchemaFormat::JsonSchemaSubset
249 }
250 }
251 }
252
253 fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
254 match choice {
255 LanguageModelToolChoice::Auto
256 | LanguageModelToolChoice::Any
257 | LanguageModelToolChoice::None => self.supports_tools(),
258 }
259 }
260
261 fn telemetry_id(&self) -> String {
262 format!("copilot_chat/{}", self.model.id())
263 }
264
265 fn max_token_count(&self) -> u64 {
266 self.model.max_token_count()
267 }
268
269 fn count_tokens(
270 &self,
271 request: LanguageModelRequest,
272 cx: &App,
273 ) -> BoxFuture<'static, Result<u64>> {
274 let model = self.model.clone();
275 cx.background_spawn(async move {
276 let messages = collect_tiktoken_messages(request);
277 // Copilot uses OpenAI tiktoken tokenizer for all it's model irrespective of the underlying provider(vendor).
278 let tokenizer_model = match model.tokenizer() {
279 Some("o200k_base") => "gpt-4o",
280 Some("cl100k_base") => "gpt-4",
281 _ => "gpt-4o",
282 };
283
284 tiktoken_rs::num_tokens_from_messages(tokenizer_model, &messages)
285 .map(|tokens| tokens as u64)
286 })
287 .boxed()
288 }
289
290 fn stream_completion(
291 &self,
292 request: LanguageModelRequest,
293 cx: &AsyncApp,
294 ) -> BoxFuture<
295 'static,
296 Result<
297 BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>,
298 LanguageModelCompletionError,
299 >,
300 > {
301 let is_user_initiated = request.intent.is_none_or(|intent| match intent {
302 CompletionIntent::UserPrompt
303 | CompletionIntent::ThreadContextSummarization
304 | CompletionIntent::InlineAssist
305 | CompletionIntent::TerminalInlineAssist
306 | CompletionIntent::GenerateGitCommitMessage => true,
307
308 CompletionIntent::ToolResults
309 | CompletionIntent::ThreadSummarization
310 | CompletionIntent::CreateFile
311 | CompletionIntent::EditFile => false,
312 });
313
314 if self.model.supports_response() {
315 let responses_request = into_copilot_responses(&self.model, request);
316 let request_limiter = self.request_limiter.clone();
317 let future = cx.spawn(async move |cx| {
318 let request =
319 CopilotChat::stream_response(responses_request, is_user_initiated, cx.clone());
320 request_limiter
321 .stream(async move {
322 let stream = request.await?;
323 let mapper = CopilotResponsesEventMapper::new();
324 Ok(mapper.map_stream(stream).boxed())
325 })
326 .await
327 });
328 return async move { Ok(future.await?.boxed()) }.boxed();
329 }
330
331 let copilot_request = match into_copilot_chat(&self.model, request) {
332 Ok(request) => request,
333 Err(err) => return futures::future::ready(Err(err.into())).boxed(),
334 };
335 let is_streaming = copilot_request.stream;
336
337 let request_limiter = self.request_limiter.clone();
338 let future = cx.spawn(async move |cx| {
339 let request =
340 CopilotChat::stream_completion(copilot_request, is_user_initiated, cx.clone());
341 request_limiter
342 .stream(async move {
343 let response = request.await?;
344 Ok(map_to_language_model_completion_events(
345 response,
346 is_streaming,
347 ))
348 })
349 .await
350 });
351 async move { Ok(future.await?.boxed()) }.boxed()
352 }
353}
354
355pub fn map_to_language_model_completion_events(
356 events: Pin<Box<dyn Send + Stream<Item = Result<ResponseEvent>>>>,
357 is_streaming: bool,
358) -> impl Stream<Item = Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
359 #[derive(Default)]
360 struct RawToolCall {
361 id: String,
362 name: String,
363 arguments: String,
364 }
365
366 struct State {
367 events: Pin<Box<dyn Send + Stream<Item = Result<ResponseEvent>>>>,
368 tool_calls_by_index: HashMap<usize, RawToolCall>,
369 }
370
371 futures::stream::unfold(
372 State {
373 events,
374 tool_calls_by_index: HashMap::default(),
375 },
376 move |mut state| async move {
377 if let Some(event) = state.events.next().await {
378 match event {
379 Ok(event) => {
380 let Some(choice) = event.choices.first() else {
381 return Some((
382 vec![Err(anyhow!("Response contained no choices").into())],
383 state,
384 ));
385 };
386
387 let delta = if is_streaming {
388 choice.delta.as_ref()
389 } else {
390 choice.message.as_ref()
391 };
392
393 let Some(delta) = delta else {
394 return Some((
395 vec![Err(anyhow!("Response contained no delta").into())],
396 state,
397 ));
398 };
399
400 let mut events = Vec::new();
401 if let Some(content) = delta.content.clone() {
402 events.push(Ok(LanguageModelCompletionEvent::Text(content)));
403 }
404
405 for (index, tool_call) in delta.tool_calls.iter().enumerate() {
406 let tool_index = tool_call.index.unwrap_or(index);
407 let entry = state.tool_calls_by_index.entry(tool_index).or_default();
408
409 if let Some(tool_id) = tool_call.id.clone() {
410 entry.id = tool_id;
411 }
412
413 if let Some(function) = tool_call.function.as_ref() {
414 if let Some(name) = function.name.clone() {
415 entry.name = name;
416 }
417
418 if let Some(arguments) = function.arguments.clone() {
419 entry.arguments.push_str(&arguments);
420 }
421 }
422 }
423
424 if let Some(usage) = event.usage {
425 events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(
426 TokenUsage {
427 input_tokens: usage.prompt_tokens,
428 output_tokens: usage.completion_tokens,
429 cache_creation_input_tokens: 0,
430 cache_read_input_tokens: 0,
431 },
432 )));
433 }
434
435 match choice.finish_reason.as_deref() {
436 Some("stop") => {
437 events.push(Ok(LanguageModelCompletionEvent::Stop(
438 StopReason::EndTurn,
439 )));
440 }
441 Some("tool_calls") => {
442 events.extend(state.tool_calls_by_index.drain().map(
443 |(_, tool_call)| {
444 // The model can output an empty string
445 // to indicate the absence of arguments.
446 // When that happens, create an empty
447 // object instead.
448 let arguments = if tool_call.arguments.is_empty() {
449 Ok(serde_json::Value::Object(Default::default()))
450 } else {
451 serde_json::Value::from_str(&tool_call.arguments)
452 };
453 match arguments {
454 Ok(input) => Ok(LanguageModelCompletionEvent::ToolUse(
455 LanguageModelToolUse {
456 id: tool_call.id.into(),
457 name: tool_call.name.as_str().into(),
458 is_input_complete: true,
459 input,
460 raw_input: tool_call.arguments,
461 thought_signature: None,
462 },
463 )),
464 Err(error) => Ok(
465 LanguageModelCompletionEvent::ToolUseJsonParseError {
466 id: tool_call.id.into(),
467 tool_name: tool_call.name.as_str().into(),
468 raw_input: tool_call.arguments.into(),
469 json_parse_error: error.to_string(),
470 },
471 ),
472 }
473 },
474 ));
475
476 events.push(Ok(LanguageModelCompletionEvent::Stop(
477 StopReason::ToolUse,
478 )));
479 }
480 Some(stop_reason) => {
481 log::error!("Unexpected Copilot Chat stop_reason: {stop_reason:?}");
482 events.push(Ok(LanguageModelCompletionEvent::Stop(
483 StopReason::EndTurn,
484 )));
485 }
486 None => {}
487 }
488
489 return Some((events, state));
490 }
491 Err(err) => return Some((vec![Err(anyhow!(err).into())], state)),
492 }
493 }
494
495 None
496 },
497 )
498 .flat_map(futures::stream::iter)
499}
500
501pub struct CopilotResponsesEventMapper {
502 pending_stop_reason: Option<StopReason>,
503}
504
505impl CopilotResponsesEventMapper {
506 pub fn new() -> Self {
507 Self {
508 pending_stop_reason: None,
509 }
510 }
511
512 pub fn map_stream(
513 mut self,
514 events: Pin<Box<dyn Send + Stream<Item = Result<copilot::copilot_responses::StreamEvent>>>>,
515 ) -> impl Stream<Item = Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
516 {
517 events.flat_map(move |event| {
518 futures::stream::iter(match event {
519 Ok(event) => self.map_event(event),
520 Err(error) => vec![Err(LanguageModelCompletionError::from(anyhow!(error)))],
521 })
522 })
523 }
524
525 fn map_event(
526 &mut self,
527 event: copilot::copilot_responses::StreamEvent,
528 ) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
529 match event {
530 copilot::copilot_responses::StreamEvent::OutputItemAdded { item, .. } => match item {
531 copilot::copilot_responses::ResponseOutputItem::Message { id, .. } => {
532 vec![Ok(LanguageModelCompletionEvent::StartMessage {
533 message_id: id,
534 })]
535 }
536 _ => Vec::new(),
537 },
538
539 copilot::copilot_responses::StreamEvent::OutputTextDelta { delta, .. } => {
540 if delta.is_empty() {
541 Vec::new()
542 } else {
543 vec![Ok(LanguageModelCompletionEvent::Text(delta))]
544 }
545 }
546
547 copilot::copilot_responses::StreamEvent::OutputItemDone { item, .. } => match item {
548 copilot::copilot_responses::ResponseOutputItem::Message { .. } => Vec::new(),
549 copilot::copilot_responses::ResponseOutputItem::FunctionCall {
550 call_id,
551 name,
552 arguments,
553 ..
554 } => {
555 let mut events = Vec::new();
556 match serde_json::from_str::<serde_json::Value>(&arguments) {
557 Ok(input) => events.push(Ok(LanguageModelCompletionEvent::ToolUse(
558 LanguageModelToolUse {
559 id: call_id.into(),
560 name: name.as_str().into(),
561 is_input_complete: true,
562 input,
563 raw_input: arguments.clone(),
564 thought_signature: None,
565 },
566 ))),
567 Err(error) => {
568 events.push(Ok(LanguageModelCompletionEvent::ToolUseJsonParseError {
569 id: call_id.into(),
570 tool_name: name.as_str().into(),
571 raw_input: arguments.clone().into(),
572 json_parse_error: error.to_string(),
573 }))
574 }
575 }
576 // Record that we already emitted a tool-use stop so we can avoid duplicating
577 // a Stop event on Completed.
578 self.pending_stop_reason = Some(StopReason::ToolUse);
579 events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::ToolUse)));
580 events
581 }
582 copilot::copilot_responses::ResponseOutputItem::Reasoning {
583 summary,
584 encrypted_content,
585 ..
586 } => {
587 let mut events = Vec::new();
588
589 if let Some(blocks) = summary {
590 let mut text = String::new();
591 for block in blocks {
592 text.push_str(&block.text);
593 }
594 if !text.is_empty() {
595 events.push(Ok(LanguageModelCompletionEvent::Thinking {
596 text,
597 signature: None,
598 }));
599 }
600 }
601
602 if let Some(data) = encrypted_content {
603 events.push(Ok(LanguageModelCompletionEvent::RedactedThinking { data }));
604 }
605
606 events
607 }
608 },
609
610 copilot::copilot_responses::StreamEvent::Completed { response } => {
611 let mut events = Vec::new();
612 if let Some(usage) = response.usage {
613 events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
614 input_tokens: usage.input_tokens.unwrap_or(0),
615 output_tokens: usage.output_tokens.unwrap_or(0),
616 cache_creation_input_tokens: 0,
617 cache_read_input_tokens: 0,
618 })));
619 }
620 if self.pending_stop_reason.take() != Some(StopReason::ToolUse) {
621 events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::EndTurn)));
622 }
623 events
624 }
625
626 copilot::copilot_responses::StreamEvent::Incomplete { response } => {
627 let reason = response
628 .incomplete_details
629 .as_ref()
630 .and_then(|details| details.reason.as_ref());
631 let stop_reason = match reason {
632 Some(copilot::copilot_responses::IncompleteReason::MaxOutputTokens) => {
633 StopReason::MaxTokens
634 }
635 Some(copilot::copilot_responses::IncompleteReason::ContentFilter) => {
636 StopReason::Refusal
637 }
638 _ => self
639 .pending_stop_reason
640 .take()
641 .unwrap_or(StopReason::EndTurn),
642 };
643
644 let mut events = Vec::new();
645 if let Some(usage) = response.usage {
646 events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
647 input_tokens: usage.input_tokens.unwrap_or(0),
648 output_tokens: usage.output_tokens.unwrap_or(0),
649 cache_creation_input_tokens: 0,
650 cache_read_input_tokens: 0,
651 })));
652 }
653 events.push(Ok(LanguageModelCompletionEvent::Stop(stop_reason)));
654 events
655 }
656
657 copilot::copilot_responses::StreamEvent::Failed { response } => {
658 let provider = PROVIDER_NAME;
659 let (status_code, message) = match response.error {
660 Some(error) => {
661 let status_code = StatusCode::from_str(&error.code)
662 .unwrap_or(StatusCode::INTERNAL_SERVER_ERROR);
663 (status_code, error.message)
664 }
665 None => (
666 StatusCode::INTERNAL_SERVER_ERROR,
667 "response.failed".to_string(),
668 ),
669 };
670 vec![Err(LanguageModelCompletionError::HttpResponseError {
671 provider,
672 status_code,
673 message,
674 })]
675 }
676
677 copilot::copilot_responses::StreamEvent::GenericError { error } => vec![Err(
678 LanguageModelCompletionError::Other(anyhow!(format!("{error:?}"))),
679 )],
680
681 copilot::copilot_responses::StreamEvent::Created { .. }
682 | copilot::copilot_responses::StreamEvent::Unknown => Vec::new(),
683 }
684 }
685}
686
687fn into_copilot_chat(
688 model: &copilot::copilot_chat::Model,
689 request: LanguageModelRequest,
690) -> Result<CopilotChatRequest> {
691 let mut request_messages: Vec<LanguageModelRequestMessage> = Vec::new();
692 for message in request.messages {
693 if let Some(last_message) = request_messages.last_mut() {
694 if last_message.role == message.role {
695 last_message.content.extend(message.content);
696 } else {
697 request_messages.push(message);
698 }
699 } else {
700 request_messages.push(message);
701 }
702 }
703
704 let mut messages: Vec<ChatMessage> = Vec::new();
705 for message in request_messages {
706 match message.role {
707 Role::User => {
708 for content in &message.content {
709 if let MessageContent::ToolResult(tool_result) = content {
710 let content = match &tool_result.content {
711 LanguageModelToolResultContent::Text(text) => text.to_string().into(),
712 LanguageModelToolResultContent::Image(image) => {
713 if model.supports_vision() {
714 ChatMessageContent::Multipart(vec![ChatMessagePart::Image {
715 image_url: ImageUrl {
716 url: image.to_base64_url(),
717 },
718 }])
719 } else {
720 debug_panic!(
721 "This should be caught at {} level",
722 tool_result.tool_name
723 );
724 "[Tool responded with an image, but this model does not support vision]".to_string().into()
725 }
726 }
727 };
728
729 messages.push(ChatMessage::Tool {
730 tool_call_id: tool_result.tool_use_id.to_string(),
731 content,
732 });
733 }
734 }
735
736 let mut content_parts = Vec::new();
737 for content in &message.content {
738 match content {
739 MessageContent::Text(text) | MessageContent::Thinking { text, .. }
740 if !text.is_empty() =>
741 {
742 if let Some(ChatMessagePart::Text { text: text_content }) =
743 content_parts.last_mut()
744 {
745 text_content.push_str(text);
746 } else {
747 content_parts.push(ChatMessagePart::Text {
748 text: text.to_string(),
749 });
750 }
751 }
752 MessageContent::Image(image) if model.supports_vision() => {
753 content_parts.push(ChatMessagePart::Image {
754 image_url: ImageUrl {
755 url: image.to_base64_url(),
756 },
757 });
758 }
759 _ => {}
760 }
761 }
762
763 if !content_parts.is_empty() {
764 messages.push(ChatMessage::User {
765 content: content_parts.into(),
766 });
767 }
768 }
769 Role::Assistant => {
770 let mut tool_calls = Vec::new();
771 for content in &message.content {
772 if let MessageContent::ToolUse(tool_use) = content {
773 tool_calls.push(ToolCall {
774 id: tool_use.id.to_string(),
775 content: copilot::copilot_chat::ToolCallContent::Function {
776 function: copilot::copilot_chat::FunctionContent {
777 name: tool_use.name.to_string(),
778 arguments: serde_json::to_string(&tool_use.input)?,
779 },
780 },
781 });
782 }
783 }
784
785 let text_content = {
786 let mut buffer = String::new();
787 for string in message.content.iter().filter_map(|content| match content {
788 MessageContent::Text(text) | MessageContent::Thinking { text, .. } => {
789 Some(text.as_str())
790 }
791 MessageContent::ToolUse(_)
792 | MessageContent::RedactedThinking(_)
793 | MessageContent::ToolResult(_)
794 | MessageContent::Image(_) => None,
795 }) {
796 buffer.push_str(string);
797 }
798
799 buffer
800 };
801
802 messages.push(ChatMessage::Assistant {
803 content: if text_content.is_empty() {
804 ChatMessageContent::empty()
805 } else {
806 text_content.into()
807 },
808 tool_calls,
809 });
810 }
811 Role::System => messages.push(ChatMessage::System {
812 content: message.string_contents(),
813 }),
814 }
815 }
816
817 let tools = request
818 .tools
819 .iter()
820 .map(|tool| Tool::Function {
821 function: copilot::copilot_chat::Function {
822 name: tool.name.clone(),
823 description: tool.description.clone(),
824 parameters: tool.input_schema.clone(),
825 },
826 })
827 .collect::<Vec<_>>();
828
829 Ok(CopilotChatRequest {
830 intent: true,
831 n: 1,
832 stream: model.uses_streaming(),
833 temperature: 0.1,
834 model: model.id().to_string(),
835 messages,
836 tools,
837 tool_choice: request.tool_choice.map(|choice| match choice {
838 LanguageModelToolChoice::Auto => copilot::copilot_chat::ToolChoice::Auto,
839 LanguageModelToolChoice::Any => copilot::copilot_chat::ToolChoice::Any,
840 LanguageModelToolChoice::None => copilot::copilot_chat::ToolChoice::None,
841 }),
842 })
843}
844
845fn into_copilot_responses(
846 model: &copilot::copilot_chat::Model,
847 request: LanguageModelRequest,
848) -> copilot::copilot_responses::Request {
849 use copilot::copilot_responses as responses;
850
851 let LanguageModelRequest {
852 thread_id: _,
853 prompt_id: _,
854 intent: _,
855 mode: _,
856 messages,
857 tools,
858 tool_choice,
859 stop: _,
860 temperature,
861 thinking_allowed: _,
862 } = request;
863
864 let mut input_items: Vec<responses::ResponseInputItem> = Vec::new();
865
866 for message in messages {
867 match message.role {
868 Role::User => {
869 for content in &message.content {
870 if let MessageContent::ToolResult(tool_result) = content {
871 let output = if let Some(out) = &tool_result.output {
872 match out {
873 serde_json::Value::String(s) => {
874 responses::ResponseFunctionOutput::Text(s.clone())
875 }
876 serde_json::Value::Null => {
877 responses::ResponseFunctionOutput::Text(String::new())
878 }
879 other => responses::ResponseFunctionOutput::Text(other.to_string()),
880 }
881 } else {
882 match &tool_result.content {
883 LanguageModelToolResultContent::Text(text) => {
884 responses::ResponseFunctionOutput::Text(text.to_string())
885 }
886 LanguageModelToolResultContent::Image(image) => {
887 if model.supports_vision() {
888 responses::ResponseFunctionOutput::Content(vec![
889 responses::ResponseInputContent::InputImage {
890 image_url: Some(image.to_base64_url()),
891 detail: Default::default(),
892 },
893 ])
894 } else {
895 debug_panic!(
896 "This should be caught at {} level",
897 tool_result.tool_name
898 );
899 responses::ResponseFunctionOutput::Text(
900 "[Tool responded with an image, but this model does not support vision]".into(),
901 )
902 }
903 }
904 }
905 };
906
907 input_items.push(responses::ResponseInputItem::FunctionCallOutput {
908 call_id: tool_result.tool_use_id.to_string(),
909 output,
910 status: None,
911 });
912 }
913 }
914
915 let mut parts: Vec<responses::ResponseInputContent> = Vec::new();
916 for content in &message.content {
917 match content {
918 MessageContent::Text(text) => {
919 parts.push(responses::ResponseInputContent::InputText {
920 text: text.clone(),
921 });
922 }
923
924 MessageContent::Image(image) => {
925 if model.supports_vision() {
926 parts.push(responses::ResponseInputContent::InputImage {
927 image_url: Some(image.to_base64_url()),
928 detail: Default::default(),
929 });
930 }
931 }
932 _ => {}
933 }
934 }
935
936 if !parts.is_empty() {
937 input_items.push(responses::ResponseInputItem::Message {
938 role: "user".into(),
939 content: Some(parts),
940 status: None,
941 });
942 }
943 }
944
945 Role::Assistant => {
946 for content in &message.content {
947 if let MessageContent::ToolUse(tool_use) = content {
948 input_items.push(responses::ResponseInputItem::FunctionCall {
949 call_id: tool_use.id.to_string(),
950 name: tool_use.name.to_string(),
951 arguments: tool_use.raw_input.clone(),
952 status: None,
953 });
954 }
955 }
956
957 for content in &message.content {
958 if let MessageContent::RedactedThinking(data) = content {
959 input_items.push(responses::ResponseInputItem::Reasoning {
960 id: None,
961 summary: Vec::new(),
962 encrypted_content: data.clone(),
963 });
964 }
965 }
966
967 let mut parts: Vec<responses::ResponseInputContent> = Vec::new();
968 for content in &message.content {
969 match content {
970 MessageContent::Text(text) => {
971 parts.push(responses::ResponseInputContent::OutputText {
972 text: text.clone(),
973 });
974 }
975 MessageContent::Image(_) => {
976 parts.push(responses::ResponseInputContent::OutputText {
977 text: "[image omitted]".to_string(),
978 });
979 }
980 _ => {}
981 }
982 }
983
984 if !parts.is_empty() {
985 input_items.push(responses::ResponseInputItem::Message {
986 role: "assistant".into(),
987 content: Some(parts),
988 status: Some("completed".into()),
989 });
990 }
991 }
992
993 Role::System => {
994 let mut parts: Vec<responses::ResponseInputContent> = Vec::new();
995 for content in &message.content {
996 if let MessageContent::Text(text) = content {
997 parts.push(responses::ResponseInputContent::InputText {
998 text: text.clone(),
999 });
1000 }
1001 }
1002
1003 if !parts.is_empty() {
1004 input_items.push(responses::ResponseInputItem::Message {
1005 role: "system".into(),
1006 content: Some(parts),
1007 status: None,
1008 });
1009 }
1010 }
1011 }
1012 }
1013
1014 let converted_tools: Vec<responses::ToolDefinition> = tools
1015 .into_iter()
1016 .map(|tool| responses::ToolDefinition::Function {
1017 name: tool.name,
1018 description: Some(tool.description),
1019 parameters: Some(tool.input_schema),
1020 strict: None,
1021 })
1022 .collect();
1023
1024 let mapped_tool_choice = tool_choice.map(|choice| match choice {
1025 LanguageModelToolChoice::Auto => responses::ToolChoice::Auto,
1026 LanguageModelToolChoice::Any => responses::ToolChoice::Any,
1027 LanguageModelToolChoice::None => responses::ToolChoice::None,
1028 });
1029
1030 responses::Request {
1031 model: model.id().to_string(),
1032 input: input_items,
1033 stream: model.uses_streaming(),
1034 temperature,
1035 tools: converted_tools,
1036 tool_choice: mapped_tool_choice,
1037 reasoning: None, // We would need to add support for setting from user settings.
1038 include: Some(vec![
1039 copilot::copilot_responses::ResponseIncludable::ReasoningEncryptedContent,
1040 ]),
1041 }
1042}
1043
1044#[cfg(test)]
1045mod tests {
1046 use super::*;
1047 use copilot::copilot_responses as responses;
1048 use futures::StreamExt;
1049
1050 fn map_events(events: Vec<responses::StreamEvent>) -> Vec<LanguageModelCompletionEvent> {
1051 futures::executor::block_on(async {
1052 CopilotResponsesEventMapper::new()
1053 .map_stream(Box::pin(futures::stream::iter(events.into_iter().map(Ok))))
1054 .collect::<Vec<_>>()
1055 .await
1056 .into_iter()
1057 .map(Result::unwrap)
1058 .collect()
1059 })
1060 }
1061
1062 #[test]
1063 fn responses_stream_maps_text_and_usage() {
1064 let events = vec![
1065 responses::StreamEvent::OutputItemAdded {
1066 output_index: 0,
1067 sequence_number: None,
1068 item: responses::ResponseOutputItem::Message {
1069 id: "msg_1".into(),
1070 role: "assistant".into(),
1071 content: Some(Vec::new()),
1072 },
1073 },
1074 responses::StreamEvent::OutputTextDelta {
1075 item_id: "msg_1".into(),
1076 output_index: 0,
1077 delta: "Hello".into(),
1078 },
1079 responses::StreamEvent::Completed {
1080 response: responses::Response {
1081 usage: Some(responses::ResponseUsage {
1082 input_tokens: Some(5),
1083 output_tokens: Some(3),
1084 total_tokens: Some(8),
1085 }),
1086 ..Default::default()
1087 },
1088 },
1089 ];
1090
1091 let mapped = map_events(events);
1092 assert!(matches!(
1093 mapped[0],
1094 LanguageModelCompletionEvent::StartMessage { ref message_id } if message_id == "msg_1"
1095 ));
1096 assert!(matches!(
1097 mapped[1],
1098 LanguageModelCompletionEvent::Text(ref text) if text == "Hello"
1099 ));
1100 assert!(matches!(
1101 mapped[2],
1102 LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
1103 input_tokens: 5,
1104 output_tokens: 3,
1105 ..
1106 })
1107 ));
1108 assert!(matches!(
1109 mapped[3],
1110 LanguageModelCompletionEvent::Stop(StopReason::EndTurn)
1111 ));
1112 }
1113
1114 #[test]
1115 fn responses_stream_maps_tool_calls() {
1116 let events = vec![responses::StreamEvent::OutputItemDone {
1117 output_index: 0,
1118 sequence_number: None,
1119 item: responses::ResponseOutputItem::FunctionCall {
1120 id: Some("fn_1".into()),
1121 call_id: "call_1".into(),
1122 name: "do_it".into(),
1123 arguments: "{\"x\":1}".into(),
1124 status: None,
1125 },
1126 }];
1127
1128 let mapped = map_events(events);
1129 assert!(matches!(
1130 mapped[0],
1131 LanguageModelCompletionEvent::ToolUse(ref use_) if use_.id.to_string() == "call_1" && use_.name.as_ref() == "do_it"
1132 ));
1133 assert!(matches!(
1134 mapped[1],
1135 LanguageModelCompletionEvent::Stop(StopReason::ToolUse)
1136 ));
1137 }
1138
1139 #[test]
1140 fn responses_stream_handles_json_parse_error() {
1141 let events = vec![responses::StreamEvent::OutputItemDone {
1142 output_index: 0,
1143 sequence_number: None,
1144 item: responses::ResponseOutputItem::FunctionCall {
1145 id: Some("fn_1".into()),
1146 call_id: "call_1".into(),
1147 name: "do_it".into(),
1148 arguments: "{not json}".into(),
1149 status: None,
1150 },
1151 }];
1152
1153 let mapped = map_events(events);
1154 assert!(matches!(
1155 mapped[0],
1156 LanguageModelCompletionEvent::ToolUseJsonParseError { ref id, ref tool_name, .. }
1157 if id.to_string() == "call_1" && tool_name.as_ref() == "do_it"
1158 ));
1159 assert!(matches!(
1160 mapped[1],
1161 LanguageModelCompletionEvent::Stop(StopReason::ToolUse)
1162 ));
1163 }
1164
1165 #[test]
1166 fn responses_stream_maps_reasoning_summary_and_encrypted_content() {
1167 let events = vec![responses::StreamEvent::OutputItemDone {
1168 output_index: 0,
1169 sequence_number: None,
1170 item: responses::ResponseOutputItem::Reasoning {
1171 id: "r1".into(),
1172 summary: Some(vec![responses::ResponseReasoningItem {
1173 kind: "summary_text".into(),
1174 text: "Chain".into(),
1175 }]),
1176 encrypted_content: Some("ENC".into()),
1177 },
1178 }];
1179
1180 let mapped = map_events(events);
1181 assert!(matches!(
1182 mapped[0],
1183 LanguageModelCompletionEvent::Thinking { ref text, signature: None } if text == "Chain"
1184 ));
1185 assert!(matches!(
1186 mapped[1],
1187 LanguageModelCompletionEvent::RedactedThinking { ref data } if data == "ENC"
1188 ));
1189 }
1190
1191 #[test]
1192 fn responses_stream_handles_incomplete_max_tokens() {
1193 let events = vec![responses::StreamEvent::Incomplete {
1194 response: responses::Response {
1195 usage: Some(responses::ResponseUsage {
1196 input_tokens: Some(10),
1197 output_tokens: Some(0),
1198 total_tokens: Some(10),
1199 }),
1200 incomplete_details: Some(responses::IncompleteDetails {
1201 reason: Some(responses::IncompleteReason::MaxOutputTokens),
1202 }),
1203 ..Default::default()
1204 },
1205 }];
1206
1207 let mapped = map_events(events);
1208 assert!(matches!(
1209 mapped[0],
1210 LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
1211 input_tokens: 10,
1212 output_tokens: 0,
1213 ..
1214 })
1215 ));
1216 assert!(matches!(
1217 mapped[1],
1218 LanguageModelCompletionEvent::Stop(StopReason::MaxTokens)
1219 ));
1220 }
1221
1222 #[test]
1223 fn responses_stream_handles_incomplete_content_filter() {
1224 let events = vec![responses::StreamEvent::Incomplete {
1225 response: responses::Response {
1226 usage: None,
1227 incomplete_details: Some(responses::IncompleteDetails {
1228 reason: Some(responses::IncompleteReason::ContentFilter),
1229 }),
1230 ..Default::default()
1231 },
1232 }];
1233
1234 let mapped = map_events(events);
1235 assert!(matches!(
1236 mapped.last().unwrap(),
1237 LanguageModelCompletionEvent::Stop(StopReason::Refusal)
1238 ));
1239 }
1240
1241 #[test]
1242 fn responses_stream_completed_no_duplicate_after_tool_use() {
1243 let events = vec![
1244 responses::StreamEvent::OutputItemDone {
1245 output_index: 0,
1246 sequence_number: None,
1247 item: responses::ResponseOutputItem::FunctionCall {
1248 id: Some("fn_1".into()),
1249 call_id: "call_1".into(),
1250 name: "do_it".into(),
1251 arguments: "{}".into(),
1252 status: None,
1253 },
1254 },
1255 responses::StreamEvent::Completed {
1256 response: responses::Response::default(),
1257 },
1258 ];
1259
1260 let mapped = map_events(events);
1261
1262 let mut stop_count = 0usize;
1263 let mut saw_tool_use_stop = false;
1264 for event in mapped {
1265 if let LanguageModelCompletionEvent::Stop(reason) = event {
1266 stop_count += 1;
1267 if matches!(reason, StopReason::ToolUse) {
1268 saw_tool_use_stop = true;
1269 }
1270 }
1271 }
1272 assert_eq!(stop_count, 1, "should emit exactly one Stop event");
1273 assert!(saw_tool_use_stop, "Stop reason should be ToolUse");
1274 }
1275
1276 #[test]
1277 fn responses_stream_failed_maps_http_response_error() {
1278 let events = vec![responses::StreamEvent::Failed {
1279 response: responses::Response {
1280 error: Some(responses::ResponseError {
1281 code: "429".into(),
1282 message: "too many requests".into(),
1283 }),
1284 ..Default::default()
1285 },
1286 }];
1287
1288 let mapped_results = futures::executor::block_on(async {
1289 CopilotResponsesEventMapper::new()
1290 .map_stream(Box::pin(futures::stream::iter(events.into_iter().map(Ok))))
1291 .collect::<Vec<_>>()
1292 .await
1293 });
1294
1295 assert_eq!(mapped_results.len(), 1);
1296 match &mapped_results[0] {
1297 Err(LanguageModelCompletionError::HttpResponseError {
1298 status_code,
1299 message,
1300 ..
1301 }) => {
1302 assert_eq!(*status_code, http_client::StatusCode::TOO_MANY_REQUESTS);
1303 assert_eq!(message, "too many requests");
1304 }
1305 other => panic!("expected HttpResponseError, got {:?}", other),
1306 }
1307 }
1308}
1309struct ConfigurationView {
1310 copilot_status: Option<copilot::Status>,
1311 state: Entity<State>,
1312 _subscription: Option<Subscription>,
1313}
1314
1315impl ConfigurationView {
1316 pub fn new(state: Entity<State>, cx: &mut Context<Self>) -> Self {
1317 let copilot = Copilot::global(cx);
1318
1319 Self {
1320 copilot_status: copilot.as_ref().map(|copilot| copilot.read(cx).status()),
1321 state,
1322 _subscription: copilot.as_ref().map(|copilot| {
1323 cx.observe(copilot, |this, model, cx| {
1324 this.copilot_status = Some(model.read(cx).status());
1325 cx.notify();
1326 })
1327 }),
1328 }
1329 }
1330}
1331
1332impl Render for ConfigurationView {
1333 fn render(&mut self, _window: &mut Window, cx: &mut Context<Self>) -> impl IntoElement {
1334 if self.state.read(cx).is_authenticated(cx) {
1335 ConfiguredApiCard::new("Authorized")
1336 .button_label("Sign Out")
1337 .on_click(|_, window, cx| {
1338 window.dispatch_action(copilot::SignOut.boxed_clone(), cx);
1339 })
1340 .into_any_element()
1341 } else {
1342 let loading_icon = Icon::new(IconName::ArrowCircle).with_rotate_animation(4);
1343
1344 const ERROR_LABEL: &str = "Copilot Chat requires an active GitHub Copilot subscription. Please ensure Copilot is configured and try again, or use a different Assistant provider.";
1345
1346 match &self.copilot_status {
1347 Some(status) => match status {
1348 Status::Starting { task: _ } => h_flex()
1349 .gap_2()
1350 .child(loading_icon)
1351 .child(Label::new("Starting Copilot…"))
1352 .into_any_element(),
1353 Status::SigningIn { prompt: _ }
1354 | Status::SignedOut {
1355 awaiting_signing_in: true,
1356 } => h_flex()
1357 .gap_2()
1358 .child(loading_icon)
1359 .child(Label::new("Signing into Copilot…"))
1360 .into_any_element(),
1361 Status::Error(_) => {
1362 const LABEL: &str = "Copilot had issues starting. Please try restarting it. If the issue persists, try reinstalling Copilot.";
1363 v_flex()
1364 .gap_6()
1365 .child(Label::new(LABEL))
1366 .child(svg().size_8().path(IconName::CopilotError.path()))
1367 .into_any_element()
1368 }
1369 _ => {
1370 const LABEL: &str = "To use Zed's agent with GitHub Copilot, you need to be logged in to GitHub. Note that your GitHub account must have an active Copilot Chat subscription.";
1371
1372 v_flex()
1373 .gap_2()
1374 .child(Label::new(LABEL))
1375 .child(
1376 Button::new("sign_in", "Sign in to use GitHub Copilot")
1377 .full_width()
1378 .style(ButtonStyle::Outlined)
1379 .icon_color(Color::Muted)
1380 .icon(IconName::Github)
1381 .icon_position(IconPosition::Start)
1382 .icon_size(IconSize::Small)
1383 .on_click(|_, window, cx| {
1384 copilot::initiate_sign_in(window, cx)
1385 }),
1386 )
1387 .into_any_element()
1388 }
1389 },
1390 None => v_flex()
1391 .gap_6()
1392 .child(Label::new(ERROR_LABEL))
1393 .into_any_element(),
1394 }
1395 }
1396 }
1397}