1use std::pin::Pin;
2use std::str::FromStr as _;
3use std::sync::Arc;
4
5use anyhow::{Result, anyhow};
6use cloud_llm_client::CompletionIntent;
7use collections::HashMap;
8use copilot::copilot_chat::{
9 ChatMessage, ChatMessageContent, ChatMessagePart, CopilotChat, ImageUrl,
10 Model as CopilotChatModel, ModelVendor, Request as CopilotChatRequest, ResponseEvent, Tool,
11 ToolCall,
12};
13use copilot::{Copilot, Status};
14use futures::future::BoxFuture;
15use futures::stream::BoxStream;
16use futures::{FutureExt, Stream, StreamExt};
17use gpui::{Action, AnyView, App, AsyncApp, Entity, Render, Subscription, Task, svg};
18use http_client::StatusCode;
19use language::language_settings::all_language_settings;
20use language_model::{
21 AuthenticateError, LanguageModel, LanguageModelCompletionError, LanguageModelCompletionEvent,
22 LanguageModelId, LanguageModelName, LanguageModelProvider, LanguageModelProviderId,
23 LanguageModelProviderName, LanguageModelProviderState, LanguageModelRequest,
24 LanguageModelRequestMessage, LanguageModelToolChoice, LanguageModelToolResultContent,
25 LanguageModelToolSchemaFormat, LanguageModelToolUse, MessageContent, RateLimiter, Role,
26 StopReason, TokenUsage,
27};
28use settings::SettingsStore;
29use ui::{CommonAnimationExt, prelude::*};
30use util::debug_panic;
31
32const PROVIDER_ID: LanguageModelProviderId = LanguageModelProviderId::new("copilot_chat");
33const PROVIDER_NAME: LanguageModelProviderName =
34 LanguageModelProviderName::new("GitHub Copilot Chat");
35
36pub struct CopilotChatLanguageModelProvider {
37 state: Entity<State>,
38}
39
40pub struct State {
41 _copilot_chat_subscription: Option<Subscription>,
42 _settings_subscription: Subscription,
43}
44
45impl State {
46 fn is_authenticated(&self, cx: &App) -> bool {
47 CopilotChat::global(cx)
48 .map(|m| m.read(cx).is_authenticated())
49 .unwrap_or(false)
50 }
51}
52
53impl CopilotChatLanguageModelProvider {
54 pub fn new(cx: &mut App) -> Self {
55 let state = cx.new(|cx| {
56 let copilot_chat_subscription = CopilotChat::global(cx)
57 .map(|copilot_chat| cx.observe(&copilot_chat, |_, _, cx| cx.notify()));
58 State {
59 _copilot_chat_subscription: copilot_chat_subscription,
60 _settings_subscription: cx.observe_global::<SettingsStore>(|_, cx| {
61 if let Some(copilot_chat) = CopilotChat::global(cx) {
62 let language_settings = all_language_settings(None, cx);
63 let configuration = copilot::copilot_chat::CopilotChatConfiguration {
64 enterprise_uri: language_settings
65 .edit_predictions
66 .copilot
67 .enterprise_uri
68 .clone(),
69 };
70 copilot_chat.update(cx, |chat, cx| {
71 chat.set_configuration(configuration, cx);
72 });
73 }
74 cx.notify();
75 }),
76 }
77 });
78
79 Self { state }
80 }
81
82 fn create_language_model(&self, model: CopilotChatModel) -> Arc<dyn LanguageModel> {
83 Arc::new(CopilotChatLanguageModel {
84 model,
85 request_limiter: RateLimiter::new(4),
86 })
87 }
88}
89
90impl LanguageModelProviderState for CopilotChatLanguageModelProvider {
91 type ObservableEntity = State;
92
93 fn observable_entity(&self) -> Option<Entity<Self::ObservableEntity>> {
94 Some(self.state.clone())
95 }
96}
97
98impl LanguageModelProvider for CopilotChatLanguageModelProvider {
99 fn id(&self) -> LanguageModelProviderId {
100 PROVIDER_ID
101 }
102
103 fn name(&self) -> LanguageModelProviderName {
104 PROVIDER_NAME
105 }
106
107 fn icon(&self) -> IconName {
108 IconName::Copilot
109 }
110
111 fn default_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
112 let models = CopilotChat::global(cx).and_then(|m| m.read(cx).models())?;
113 models
114 .first()
115 .map(|model| self.create_language_model(model.clone()))
116 }
117
118 fn default_fast_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
119 // The default model should be Copilot Chat's 'base model', which is likely a relatively fast
120 // model (e.g. 4o) and a sensible choice when considering premium requests
121 self.default_model(cx)
122 }
123
124 fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
125 let Some(models) = CopilotChat::global(cx).and_then(|m| m.read(cx).models()) else {
126 return Vec::new();
127 };
128 models
129 .iter()
130 .map(|model| self.create_language_model(model.clone()))
131 .collect()
132 }
133
134 fn is_authenticated(&self, cx: &App) -> bool {
135 self.state.read(cx).is_authenticated(cx)
136 }
137
138 fn authenticate(&self, cx: &mut App) -> Task<Result<(), AuthenticateError>> {
139 if self.is_authenticated(cx) {
140 return Task::ready(Ok(()));
141 };
142
143 let Some(copilot) = Copilot::global(cx) else {
144 return Task::ready( Err(anyhow!(
145 "Copilot must be enabled for Copilot Chat to work. Please enable Copilot and try again."
146 ).into()));
147 };
148
149 let err = match copilot.read(cx).status() {
150 Status::Authorized => return Task::ready(Ok(())),
151 Status::Disabled => anyhow!(
152 "Copilot must be enabled for Copilot Chat to work. Please enable Copilot and try again."
153 ),
154 Status::Error(err) => anyhow!(format!(
155 "Received the following error while signing into Copilot: {err}"
156 )),
157 Status::Starting { task: _ } => anyhow!(
158 "Copilot is still starting, please wait for Copilot to start then try again"
159 ),
160 Status::Unauthorized => anyhow!(
161 "Unable to authorize with Copilot. Please make sure that you have an active Copilot and Copilot Chat subscription."
162 ),
163 Status::SignedOut { .. } => {
164 anyhow!("You have signed out of Copilot. Please sign in to Copilot and try again.")
165 }
166 Status::SigningIn { prompt: _ } => anyhow!("Still signing into Copilot..."),
167 };
168
169 Task::ready(Err(err.into()))
170 }
171
172 fn configuration_view(
173 &self,
174 _target_agent: language_model::ConfigurationViewTargetAgent,
175 _: &mut Window,
176 cx: &mut App,
177 ) -> AnyView {
178 let state = self.state.clone();
179 cx.new(|cx| ConfigurationView::new(state, cx)).into()
180 }
181
182 fn reset_credentials(&self, _cx: &mut App) -> Task<Result<()>> {
183 Task::ready(Err(anyhow!(
184 "Signing out of GitHub Copilot Chat is currently not supported."
185 )))
186 }
187}
188
189fn collect_tiktoken_messages(
190 request: LanguageModelRequest,
191) -> Vec<tiktoken_rs::ChatCompletionRequestMessage> {
192 request
193 .messages
194 .into_iter()
195 .map(|message| tiktoken_rs::ChatCompletionRequestMessage {
196 role: match message.role {
197 Role::User => "user".into(),
198 Role::Assistant => "assistant".into(),
199 Role::System => "system".into(),
200 },
201 content: Some(message.string_contents()),
202 name: None,
203 function_call: None,
204 })
205 .collect::<Vec<_>>()
206}
207
208pub struct CopilotChatLanguageModel {
209 model: CopilotChatModel,
210 request_limiter: RateLimiter,
211}
212
213impl LanguageModel for CopilotChatLanguageModel {
214 fn id(&self) -> LanguageModelId {
215 LanguageModelId::from(self.model.id().to_string())
216 }
217
218 fn name(&self) -> LanguageModelName {
219 LanguageModelName::from(self.model.display_name().to_string())
220 }
221
222 fn provider_id(&self) -> LanguageModelProviderId {
223 PROVIDER_ID
224 }
225
226 fn provider_name(&self) -> LanguageModelProviderName {
227 PROVIDER_NAME
228 }
229
230 fn supports_tools(&self) -> bool {
231 self.model.supports_tools()
232 }
233
234 fn supports_images(&self) -> bool {
235 self.model.supports_vision()
236 }
237
238 fn tool_input_format(&self) -> LanguageModelToolSchemaFormat {
239 match self.model.vendor() {
240 ModelVendor::OpenAI | ModelVendor::Anthropic => {
241 LanguageModelToolSchemaFormat::JsonSchema
242 }
243 ModelVendor::Google | ModelVendor::XAI | ModelVendor::Unknown => {
244 LanguageModelToolSchemaFormat::JsonSchemaSubset
245 }
246 }
247 }
248
249 fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
250 match choice {
251 LanguageModelToolChoice::Auto
252 | LanguageModelToolChoice::Any
253 | LanguageModelToolChoice::None => self.supports_tools(),
254 }
255 }
256
257 fn telemetry_id(&self) -> String {
258 format!("copilot_chat/{}", self.model.id())
259 }
260
261 fn max_token_count(&self) -> u64 {
262 self.model.max_token_count()
263 }
264
265 fn count_tokens(
266 &self,
267 request: LanguageModelRequest,
268 cx: &App,
269 ) -> BoxFuture<'static, Result<u64>> {
270 let model = self.model.clone();
271 cx.background_spawn(async move {
272 let messages = collect_tiktoken_messages(request);
273 // Copilot uses OpenAI tiktoken tokenizer for all it's model irrespective of the underlying provider(vendor).
274 let tokenizer_model = match model.tokenizer() {
275 Some("o200k_base") => "gpt-4o",
276 Some("cl100k_base") => "gpt-4",
277 _ => "gpt-4o",
278 };
279
280 tiktoken_rs::num_tokens_from_messages(tokenizer_model, &messages)
281 .map(|tokens| tokens as u64)
282 })
283 .boxed()
284 }
285
286 fn stream_completion(
287 &self,
288 request: LanguageModelRequest,
289 cx: &AsyncApp,
290 ) -> BoxFuture<
291 'static,
292 Result<
293 BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>,
294 LanguageModelCompletionError,
295 >,
296 > {
297 let is_user_initiated = request.intent.is_none_or(|intent| match intent {
298 CompletionIntent::UserPrompt
299 | CompletionIntent::ThreadContextSummarization
300 | CompletionIntent::InlineAssist
301 | CompletionIntent::TerminalInlineAssist
302 | CompletionIntent::GenerateGitCommitMessage => true,
303
304 CompletionIntent::ToolResults
305 | CompletionIntent::ThreadSummarization
306 | CompletionIntent::CreateFile
307 | CompletionIntent::EditFile => false,
308 });
309
310 if self.model.supports_response() {
311 let responses_request = into_copilot_responses(&self.model, request);
312 let request_limiter = self.request_limiter.clone();
313 let future = cx.spawn(async move |cx| {
314 let request =
315 CopilotChat::stream_response(responses_request, is_user_initiated, cx.clone());
316 request_limiter
317 .stream(async move {
318 let stream = request.await?;
319 let mapper = CopilotResponsesEventMapper::new();
320 Ok(mapper.map_stream(stream).boxed())
321 })
322 .await
323 });
324 return async move { Ok(future.await?.boxed()) }.boxed();
325 }
326
327 let copilot_request = match into_copilot_chat(&self.model, request) {
328 Ok(request) => request,
329 Err(err) => return futures::future::ready(Err(err.into())).boxed(),
330 };
331 let is_streaming = copilot_request.stream;
332
333 let request_limiter = self.request_limiter.clone();
334 let future = cx.spawn(async move |cx| {
335 let request =
336 CopilotChat::stream_completion(copilot_request, is_user_initiated, cx.clone());
337 request_limiter
338 .stream(async move {
339 let response = request.await?;
340 Ok(map_to_language_model_completion_events(
341 response,
342 is_streaming,
343 ))
344 })
345 .await
346 });
347 async move { Ok(future.await?.boxed()) }.boxed()
348 }
349}
350
351pub fn map_to_language_model_completion_events(
352 events: Pin<Box<dyn Send + Stream<Item = Result<ResponseEvent>>>>,
353 is_streaming: bool,
354) -> impl Stream<Item = Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
355 #[derive(Default)]
356 struct RawToolCall {
357 id: String,
358 name: String,
359 arguments: String,
360 }
361
362 struct State {
363 events: Pin<Box<dyn Send + Stream<Item = Result<ResponseEvent>>>>,
364 tool_calls_by_index: HashMap<usize, RawToolCall>,
365 }
366
367 futures::stream::unfold(
368 State {
369 events,
370 tool_calls_by_index: HashMap::default(),
371 },
372 move |mut state| async move {
373 if let Some(event) = state.events.next().await {
374 match event {
375 Ok(event) => {
376 let Some(choice) = event.choices.first() else {
377 return Some((
378 vec![Err(anyhow!("Response contained no choices").into())],
379 state,
380 ));
381 };
382
383 let delta = if is_streaming {
384 choice.delta.as_ref()
385 } else {
386 choice.message.as_ref()
387 };
388
389 let Some(delta) = delta else {
390 return Some((
391 vec![Err(anyhow!("Response contained no delta").into())],
392 state,
393 ));
394 };
395
396 let mut events = Vec::new();
397 if let Some(content) = delta.content.clone() {
398 events.push(Ok(LanguageModelCompletionEvent::Text(content)));
399 }
400
401 for (index, tool_call) in delta.tool_calls.iter().enumerate() {
402 let tool_index = tool_call.index.unwrap_or(index);
403 let entry = state.tool_calls_by_index.entry(tool_index).or_default();
404
405 if let Some(tool_id) = tool_call.id.clone() {
406 entry.id = tool_id;
407 }
408
409 if let Some(function) = tool_call.function.as_ref() {
410 if let Some(name) = function.name.clone() {
411 entry.name = name;
412 }
413
414 if let Some(arguments) = function.arguments.clone() {
415 entry.arguments.push_str(&arguments);
416 }
417 }
418 }
419
420 if let Some(usage) = event.usage {
421 events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(
422 TokenUsage {
423 input_tokens: usage.prompt_tokens,
424 output_tokens: usage.completion_tokens,
425 cache_creation_input_tokens: 0,
426 cache_read_input_tokens: 0,
427 },
428 )));
429 }
430
431 match choice.finish_reason.as_deref() {
432 Some("stop") => {
433 events.push(Ok(LanguageModelCompletionEvent::Stop(
434 StopReason::EndTurn,
435 )));
436 }
437 Some("tool_calls") => {
438 events.extend(state.tool_calls_by_index.drain().map(
439 |(_, tool_call)| {
440 // The model can output an empty string
441 // to indicate the absence of arguments.
442 // When that happens, create an empty
443 // object instead.
444 let arguments = if tool_call.arguments.is_empty() {
445 Ok(serde_json::Value::Object(Default::default()))
446 } else {
447 serde_json::Value::from_str(&tool_call.arguments)
448 };
449 match arguments {
450 Ok(input) => Ok(LanguageModelCompletionEvent::ToolUse(
451 LanguageModelToolUse {
452 id: tool_call.id.into(),
453 name: tool_call.name.as_str().into(),
454 is_input_complete: true,
455 input,
456 raw_input: tool_call.arguments,
457 },
458 )),
459 Err(error) => Ok(
460 LanguageModelCompletionEvent::ToolUseJsonParseError {
461 id: tool_call.id.into(),
462 tool_name: tool_call.name.as_str().into(),
463 raw_input: tool_call.arguments.into(),
464 json_parse_error: error.to_string(),
465 },
466 ),
467 }
468 },
469 ));
470
471 events.push(Ok(LanguageModelCompletionEvent::Stop(
472 StopReason::ToolUse,
473 )));
474 }
475 Some(stop_reason) => {
476 log::error!("Unexpected Copilot Chat stop_reason: {stop_reason:?}");
477 events.push(Ok(LanguageModelCompletionEvent::Stop(
478 StopReason::EndTurn,
479 )));
480 }
481 None => {}
482 }
483
484 return Some((events, state));
485 }
486 Err(err) => return Some((vec![Err(anyhow!(err).into())], state)),
487 }
488 }
489
490 None
491 },
492 )
493 .flat_map(futures::stream::iter)
494}
495
496pub struct CopilotResponsesEventMapper {
497 pending_stop_reason: Option<StopReason>,
498}
499
500impl CopilotResponsesEventMapper {
501 pub fn new() -> Self {
502 Self {
503 pending_stop_reason: None,
504 }
505 }
506
507 pub fn map_stream(
508 mut self,
509 events: Pin<Box<dyn Send + Stream<Item = Result<copilot::copilot_responses::StreamEvent>>>>,
510 ) -> impl Stream<Item = Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
511 {
512 events.flat_map(move |event| {
513 futures::stream::iter(match event {
514 Ok(event) => self.map_event(event),
515 Err(error) => vec![Err(LanguageModelCompletionError::from(anyhow!(error)))],
516 })
517 })
518 }
519
520 fn map_event(
521 &mut self,
522 event: copilot::copilot_responses::StreamEvent,
523 ) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
524 match event {
525 copilot::copilot_responses::StreamEvent::OutputItemAdded { item, .. } => match item {
526 copilot::copilot_responses::ResponseOutputItem::Message { id, .. } => {
527 vec![Ok(LanguageModelCompletionEvent::StartMessage {
528 message_id: id,
529 })]
530 }
531 _ => Vec::new(),
532 },
533
534 copilot::copilot_responses::StreamEvent::OutputTextDelta { delta, .. } => {
535 if delta.is_empty() {
536 Vec::new()
537 } else {
538 vec![Ok(LanguageModelCompletionEvent::Text(delta))]
539 }
540 }
541
542 copilot::copilot_responses::StreamEvent::OutputItemDone { item, .. } => match item {
543 copilot::copilot_responses::ResponseOutputItem::Message { .. } => Vec::new(),
544 copilot::copilot_responses::ResponseOutputItem::FunctionCall {
545 call_id,
546 name,
547 arguments,
548 ..
549 } => {
550 let mut events = Vec::new();
551 match serde_json::from_str::<serde_json::Value>(&arguments) {
552 Ok(input) => events.push(Ok(LanguageModelCompletionEvent::ToolUse(
553 LanguageModelToolUse {
554 id: call_id.into(),
555 name: name.as_str().into(),
556 is_input_complete: true,
557 input,
558 raw_input: arguments.clone(),
559 },
560 ))),
561 Err(error) => {
562 events.push(Ok(LanguageModelCompletionEvent::ToolUseJsonParseError {
563 id: call_id.into(),
564 tool_name: name.as_str().into(),
565 raw_input: arguments.clone().into(),
566 json_parse_error: error.to_string(),
567 }))
568 }
569 }
570 // Record that we already emitted a tool-use stop so we can avoid duplicating
571 // a Stop event on Completed.
572 self.pending_stop_reason = Some(StopReason::ToolUse);
573 events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::ToolUse)));
574 events
575 }
576 copilot::copilot_responses::ResponseOutputItem::Reasoning {
577 summary,
578 encrypted_content,
579 ..
580 } => {
581 let mut events = Vec::new();
582
583 if let Some(blocks) = summary {
584 let mut text = String::new();
585 for block in blocks {
586 text.push_str(&block.text);
587 }
588 if !text.is_empty() {
589 events.push(Ok(LanguageModelCompletionEvent::Thinking {
590 text,
591 signature: None,
592 }));
593 }
594 }
595
596 if let Some(data) = encrypted_content {
597 events.push(Ok(LanguageModelCompletionEvent::RedactedThinking { data }));
598 }
599
600 events
601 }
602 },
603
604 copilot::copilot_responses::StreamEvent::Completed { response } => {
605 let mut events = Vec::new();
606 if let Some(usage) = response.usage {
607 events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
608 input_tokens: usage.input_tokens.unwrap_or(0),
609 output_tokens: usage.output_tokens.unwrap_or(0),
610 cache_creation_input_tokens: 0,
611 cache_read_input_tokens: 0,
612 })));
613 }
614 if self.pending_stop_reason.take() != Some(StopReason::ToolUse) {
615 events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::EndTurn)));
616 }
617 events
618 }
619
620 copilot::copilot_responses::StreamEvent::Incomplete { response } => {
621 let reason = response
622 .incomplete_details
623 .as_ref()
624 .and_then(|details| details.reason.as_ref());
625 let stop_reason = match reason {
626 Some(copilot::copilot_responses::IncompleteReason::MaxOutputTokens) => {
627 StopReason::MaxTokens
628 }
629 Some(copilot::copilot_responses::IncompleteReason::ContentFilter) => {
630 StopReason::Refusal
631 }
632 _ => self
633 .pending_stop_reason
634 .take()
635 .unwrap_or(StopReason::EndTurn),
636 };
637
638 let mut events = Vec::new();
639 if let Some(usage) = response.usage {
640 events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
641 input_tokens: usage.input_tokens.unwrap_or(0),
642 output_tokens: usage.output_tokens.unwrap_or(0),
643 cache_creation_input_tokens: 0,
644 cache_read_input_tokens: 0,
645 })));
646 }
647 events.push(Ok(LanguageModelCompletionEvent::Stop(stop_reason)));
648 events
649 }
650
651 copilot::copilot_responses::StreamEvent::Failed { response } => {
652 let provider = PROVIDER_NAME;
653 let (status_code, message) = match response.error {
654 Some(error) => {
655 let status_code = StatusCode::from_str(&error.code)
656 .unwrap_or(StatusCode::INTERNAL_SERVER_ERROR);
657 (status_code, error.message)
658 }
659 None => (
660 StatusCode::INTERNAL_SERVER_ERROR,
661 "response.failed".to_string(),
662 ),
663 };
664 vec![Err(LanguageModelCompletionError::HttpResponseError {
665 provider,
666 status_code,
667 message,
668 })]
669 }
670
671 copilot::copilot_responses::StreamEvent::GenericError { error } => vec![Err(
672 LanguageModelCompletionError::Other(anyhow!(format!("{error:?}"))),
673 )],
674
675 copilot::copilot_responses::StreamEvent::Created { .. }
676 | copilot::copilot_responses::StreamEvent::Unknown => Vec::new(),
677 }
678 }
679}
680
681fn into_copilot_chat(
682 model: &copilot::copilot_chat::Model,
683 request: LanguageModelRequest,
684) -> Result<CopilotChatRequest> {
685 let mut request_messages: Vec<LanguageModelRequestMessage> = Vec::new();
686 for message in request.messages {
687 if let Some(last_message) = request_messages.last_mut() {
688 if last_message.role == message.role {
689 last_message.content.extend(message.content);
690 } else {
691 request_messages.push(message);
692 }
693 } else {
694 request_messages.push(message);
695 }
696 }
697
698 let mut messages: Vec<ChatMessage> = Vec::new();
699 for message in request_messages {
700 match message.role {
701 Role::User => {
702 for content in &message.content {
703 if let MessageContent::ToolResult(tool_result) = content {
704 let content = match &tool_result.content {
705 LanguageModelToolResultContent::Text(text) => text.to_string().into(),
706 LanguageModelToolResultContent::Image(image) => {
707 if model.supports_vision() {
708 ChatMessageContent::Multipart(vec![ChatMessagePart::Image {
709 image_url: ImageUrl {
710 url: image.to_base64_url(),
711 },
712 }])
713 } else {
714 debug_panic!(
715 "This should be caught at {} level",
716 tool_result.tool_name
717 );
718 "[Tool responded with an image, but this model does not support vision]".to_string().into()
719 }
720 }
721 };
722
723 messages.push(ChatMessage::Tool {
724 tool_call_id: tool_result.tool_use_id.to_string(),
725 content,
726 });
727 }
728 }
729
730 let mut content_parts = Vec::new();
731 for content in &message.content {
732 match content {
733 MessageContent::Text(text) | MessageContent::Thinking { text, .. }
734 if !text.is_empty() =>
735 {
736 if let Some(ChatMessagePart::Text { text: text_content }) =
737 content_parts.last_mut()
738 {
739 text_content.push_str(text);
740 } else {
741 content_parts.push(ChatMessagePart::Text {
742 text: text.to_string(),
743 });
744 }
745 }
746 MessageContent::Image(image) if model.supports_vision() => {
747 content_parts.push(ChatMessagePart::Image {
748 image_url: ImageUrl {
749 url: image.to_base64_url(),
750 },
751 });
752 }
753 _ => {}
754 }
755 }
756
757 if !content_parts.is_empty() {
758 messages.push(ChatMessage::User {
759 content: content_parts.into(),
760 });
761 }
762 }
763 Role::Assistant => {
764 let mut tool_calls = Vec::new();
765 for content in &message.content {
766 if let MessageContent::ToolUse(tool_use) = content {
767 tool_calls.push(ToolCall {
768 id: tool_use.id.to_string(),
769 content: copilot::copilot_chat::ToolCallContent::Function {
770 function: copilot::copilot_chat::FunctionContent {
771 name: tool_use.name.to_string(),
772 arguments: serde_json::to_string(&tool_use.input)?,
773 },
774 },
775 });
776 }
777 }
778
779 let text_content = {
780 let mut buffer = String::new();
781 for string in message.content.iter().filter_map(|content| match content {
782 MessageContent::Text(text) | MessageContent::Thinking { text, .. } => {
783 Some(text.as_str())
784 }
785 MessageContent::ToolUse(_)
786 | MessageContent::RedactedThinking(_)
787 | MessageContent::ToolResult(_)
788 | MessageContent::Image(_) => None,
789 }) {
790 buffer.push_str(string);
791 }
792
793 buffer
794 };
795
796 messages.push(ChatMessage::Assistant {
797 content: if text_content.is_empty() {
798 ChatMessageContent::empty()
799 } else {
800 text_content.into()
801 },
802 tool_calls,
803 });
804 }
805 Role::System => messages.push(ChatMessage::System {
806 content: message.string_contents(),
807 }),
808 }
809 }
810
811 let tools = request
812 .tools
813 .iter()
814 .map(|tool| Tool::Function {
815 function: copilot::copilot_chat::Function {
816 name: tool.name.clone(),
817 description: tool.description.clone(),
818 parameters: tool.input_schema.clone(),
819 },
820 })
821 .collect::<Vec<_>>();
822
823 Ok(CopilotChatRequest {
824 intent: true,
825 n: 1,
826 stream: model.uses_streaming(),
827 temperature: 0.1,
828 model: model.id().to_string(),
829 messages,
830 tools,
831 tool_choice: request.tool_choice.map(|choice| match choice {
832 LanguageModelToolChoice::Auto => copilot::copilot_chat::ToolChoice::Auto,
833 LanguageModelToolChoice::Any => copilot::copilot_chat::ToolChoice::Any,
834 LanguageModelToolChoice::None => copilot::copilot_chat::ToolChoice::None,
835 }),
836 })
837}
838
839fn into_copilot_responses(
840 model: &copilot::copilot_chat::Model,
841 request: LanguageModelRequest,
842) -> copilot::copilot_responses::Request {
843 use copilot::copilot_responses as responses;
844
845 let LanguageModelRequest {
846 thread_id: _,
847 prompt_id: _,
848 intent: _,
849 mode: _,
850 messages,
851 tools,
852 tool_choice,
853 stop: _,
854 temperature,
855 thinking_allowed: _,
856 } = request;
857
858 let mut input_items: Vec<responses::ResponseInputItem> = Vec::new();
859
860 for message in messages {
861 match message.role {
862 Role::User => {
863 for content in &message.content {
864 if let MessageContent::ToolResult(tool_result) = content {
865 let output = if let Some(out) = &tool_result.output {
866 match out {
867 serde_json::Value::String(s) => {
868 responses::ResponseFunctionOutput::Text(s.clone())
869 }
870 serde_json::Value::Null => {
871 responses::ResponseFunctionOutput::Text(String::new())
872 }
873 other => responses::ResponseFunctionOutput::Text(other.to_string()),
874 }
875 } else {
876 match &tool_result.content {
877 LanguageModelToolResultContent::Text(text) => {
878 responses::ResponseFunctionOutput::Text(text.to_string())
879 }
880 LanguageModelToolResultContent::Image(image) => {
881 if model.supports_vision() {
882 responses::ResponseFunctionOutput::Content(vec![
883 responses::ResponseInputContent::InputImage {
884 image_url: Some(image.to_base64_url()),
885 detail: Default::default(),
886 },
887 ])
888 } else {
889 debug_panic!(
890 "This should be caught at {} level",
891 tool_result.tool_name
892 );
893 responses::ResponseFunctionOutput::Text(
894 "[Tool responded with an image, but this model does not support vision]".into(),
895 )
896 }
897 }
898 }
899 };
900
901 input_items.push(responses::ResponseInputItem::FunctionCallOutput {
902 call_id: tool_result.tool_use_id.to_string(),
903 output,
904 status: None,
905 });
906 }
907 }
908
909 let mut parts: Vec<responses::ResponseInputContent> = Vec::new();
910 for content in &message.content {
911 match content {
912 MessageContent::Text(text) => {
913 parts.push(responses::ResponseInputContent::InputText {
914 text: text.clone(),
915 });
916 }
917
918 MessageContent::Image(image) => {
919 if model.supports_vision() {
920 parts.push(responses::ResponseInputContent::InputImage {
921 image_url: Some(image.to_base64_url()),
922 detail: Default::default(),
923 });
924 }
925 }
926 _ => {}
927 }
928 }
929
930 if !parts.is_empty() {
931 input_items.push(responses::ResponseInputItem::Message {
932 role: "user".into(),
933 content: Some(parts),
934 status: None,
935 });
936 }
937 }
938
939 Role::Assistant => {
940 for content in &message.content {
941 if let MessageContent::ToolUse(tool_use) = content {
942 input_items.push(responses::ResponseInputItem::FunctionCall {
943 call_id: tool_use.id.to_string(),
944 name: tool_use.name.to_string(),
945 arguments: tool_use.raw_input.clone(),
946 status: None,
947 });
948 }
949 }
950
951 for content in &message.content {
952 if let MessageContent::RedactedThinking(data) = content {
953 input_items.push(responses::ResponseInputItem::Reasoning {
954 id: None,
955 summary: Vec::new(),
956 encrypted_content: data.clone(),
957 });
958 }
959 }
960
961 let mut parts: Vec<responses::ResponseInputContent> = Vec::new();
962 for content in &message.content {
963 match content {
964 MessageContent::Text(text) => {
965 parts.push(responses::ResponseInputContent::OutputText {
966 text: text.clone(),
967 });
968 }
969 MessageContent::Image(_) => {
970 parts.push(responses::ResponseInputContent::OutputText {
971 text: "[image omitted]".to_string(),
972 });
973 }
974 _ => {}
975 }
976 }
977
978 if !parts.is_empty() {
979 input_items.push(responses::ResponseInputItem::Message {
980 role: "assistant".into(),
981 content: Some(parts),
982 status: Some("completed".into()),
983 });
984 }
985 }
986
987 Role::System => {
988 let mut parts: Vec<responses::ResponseInputContent> = Vec::new();
989 for content in &message.content {
990 if let MessageContent::Text(text) = content {
991 parts.push(responses::ResponseInputContent::InputText {
992 text: text.clone(),
993 });
994 }
995 }
996
997 if !parts.is_empty() {
998 input_items.push(responses::ResponseInputItem::Message {
999 role: "system".into(),
1000 content: Some(parts),
1001 status: None,
1002 });
1003 }
1004 }
1005 }
1006 }
1007
1008 let converted_tools: Vec<responses::ToolDefinition> = tools
1009 .into_iter()
1010 .map(|tool| responses::ToolDefinition::Function {
1011 name: tool.name,
1012 description: Some(tool.description),
1013 parameters: Some(tool.input_schema),
1014 strict: None,
1015 })
1016 .collect();
1017
1018 let mapped_tool_choice = tool_choice.map(|choice| match choice {
1019 LanguageModelToolChoice::Auto => responses::ToolChoice::Auto,
1020 LanguageModelToolChoice::Any => responses::ToolChoice::Any,
1021 LanguageModelToolChoice::None => responses::ToolChoice::None,
1022 });
1023
1024 responses::Request {
1025 model: model.id().to_string(),
1026 input: input_items,
1027 stream: model.uses_streaming(),
1028 temperature,
1029 tools: converted_tools,
1030 tool_choice: mapped_tool_choice,
1031 reasoning: None, // We would need to add support for setting from user settings.
1032 include: Some(vec![
1033 copilot::copilot_responses::ResponseIncludable::ReasoningEncryptedContent,
1034 ]),
1035 }
1036}
1037
1038#[cfg(test)]
1039mod tests {
1040 use super::*;
1041 use copilot::copilot_responses as responses;
1042 use futures::StreamExt;
1043
1044 fn map_events(events: Vec<responses::StreamEvent>) -> Vec<LanguageModelCompletionEvent> {
1045 futures::executor::block_on(async {
1046 CopilotResponsesEventMapper::new()
1047 .map_stream(Box::pin(futures::stream::iter(events.into_iter().map(Ok))))
1048 .collect::<Vec<_>>()
1049 .await
1050 .into_iter()
1051 .map(Result::unwrap)
1052 .collect()
1053 })
1054 }
1055
1056 #[test]
1057 fn responses_stream_maps_text_and_usage() {
1058 let events = vec![
1059 responses::StreamEvent::OutputItemAdded {
1060 output_index: 0,
1061 sequence_number: None,
1062 item: responses::ResponseOutputItem::Message {
1063 id: "msg_1".into(),
1064 role: "assistant".into(),
1065 content: Some(Vec::new()),
1066 },
1067 },
1068 responses::StreamEvent::OutputTextDelta {
1069 item_id: "msg_1".into(),
1070 output_index: 0,
1071 delta: "Hello".into(),
1072 },
1073 responses::StreamEvent::Completed {
1074 response: responses::Response {
1075 usage: Some(responses::ResponseUsage {
1076 input_tokens: Some(5),
1077 output_tokens: Some(3),
1078 total_tokens: Some(8),
1079 }),
1080 ..Default::default()
1081 },
1082 },
1083 ];
1084
1085 let mapped = map_events(events);
1086 assert!(matches!(
1087 mapped[0],
1088 LanguageModelCompletionEvent::StartMessage { ref message_id } if message_id == "msg_1"
1089 ));
1090 assert!(matches!(
1091 mapped[1],
1092 LanguageModelCompletionEvent::Text(ref text) if text == "Hello"
1093 ));
1094 assert!(matches!(
1095 mapped[2],
1096 LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
1097 input_tokens: 5,
1098 output_tokens: 3,
1099 ..
1100 })
1101 ));
1102 assert!(matches!(
1103 mapped[3],
1104 LanguageModelCompletionEvent::Stop(StopReason::EndTurn)
1105 ));
1106 }
1107
1108 #[test]
1109 fn responses_stream_maps_tool_calls() {
1110 let events = vec![responses::StreamEvent::OutputItemDone {
1111 output_index: 0,
1112 sequence_number: None,
1113 item: responses::ResponseOutputItem::FunctionCall {
1114 id: Some("fn_1".into()),
1115 call_id: "call_1".into(),
1116 name: "do_it".into(),
1117 arguments: "{\"x\":1}".into(),
1118 status: None,
1119 },
1120 }];
1121
1122 let mapped = map_events(events);
1123 assert!(matches!(
1124 mapped[0],
1125 LanguageModelCompletionEvent::ToolUse(ref use_) if use_.id.to_string() == "call_1" && use_.name.as_ref() == "do_it"
1126 ));
1127 assert!(matches!(
1128 mapped[1],
1129 LanguageModelCompletionEvent::Stop(StopReason::ToolUse)
1130 ));
1131 }
1132
1133 #[test]
1134 fn responses_stream_handles_json_parse_error() {
1135 let events = vec![responses::StreamEvent::OutputItemDone {
1136 output_index: 0,
1137 sequence_number: None,
1138 item: responses::ResponseOutputItem::FunctionCall {
1139 id: Some("fn_1".into()),
1140 call_id: "call_1".into(),
1141 name: "do_it".into(),
1142 arguments: "{not json}".into(),
1143 status: None,
1144 },
1145 }];
1146
1147 let mapped = map_events(events);
1148 assert!(matches!(
1149 mapped[0],
1150 LanguageModelCompletionEvent::ToolUseJsonParseError { ref id, ref tool_name, .. }
1151 if id.to_string() == "call_1" && tool_name.as_ref() == "do_it"
1152 ));
1153 assert!(matches!(
1154 mapped[1],
1155 LanguageModelCompletionEvent::Stop(StopReason::ToolUse)
1156 ));
1157 }
1158
1159 #[test]
1160 fn responses_stream_maps_reasoning_summary_and_encrypted_content() {
1161 let events = vec![responses::StreamEvent::OutputItemDone {
1162 output_index: 0,
1163 sequence_number: None,
1164 item: responses::ResponseOutputItem::Reasoning {
1165 id: "r1".into(),
1166 summary: Some(vec![responses::ResponseReasoningItem {
1167 kind: "summary_text".into(),
1168 text: "Chain".into(),
1169 }]),
1170 encrypted_content: Some("ENC".into()),
1171 },
1172 }];
1173
1174 let mapped = map_events(events);
1175 assert!(matches!(
1176 mapped[0],
1177 LanguageModelCompletionEvent::Thinking { ref text, signature: None } if text == "Chain"
1178 ));
1179 assert!(matches!(
1180 mapped[1],
1181 LanguageModelCompletionEvent::RedactedThinking { ref data } if data == "ENC"
1182 ));
1183 }
1184
1185 #[test]
1186 fn responses_stream_handles_incomplete_max_tokens() {
1187 let events = vec![responses::StreamEvent::Incomplete {
1188 response: responses::Response {
1189 usage: Some(responses::ResponseUsage {
1190 input_tokens: Some(10),
1191 output_tokens: Some(0),
1192 total_tokens: Some(10),
1193 }),
1194 incomplete_details: Some(responses::IncompleteDetails {
1195 reason: Some(responses::IncompleteReason::MaxOutputTokens),
1196 }),
1197 ..Default::default()
1198 },
1199 }];
1200
1201 let mapped = map_events(events);
1202 assert!(matches!(
1203 mapped[0],
1204 LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
1205 input_tokens: 10,
1206 output_tokens: 0,
1207 ..
1208 })
1209 ));
1210 assert!(matches!(
1211 mapped[1],
1212 LanguageModelCompletionEvent::Stop(StopReason::MaxTokens)
1213 ));
1214 }
1215
1216 #[test]
1217 fn responses_stream_handles_incomplete_content_filter() {
1218 let events = vec![responses::StreamEvent::Incomplete {
1219 response: responses::Response {
1220 usage: None,
1221 incomplete_details: Some(responses::IncompleteDetails {
1222 reason: Some(responses::IncompleteReason::ContentFilter),
1223 }),
1224 ..Default::default()
1225 },
1226 }];
1227
1228 let mapped = map_events(events);
1229 assert!(matches!(
1230 mapped.last().unwrap(),
1231 LanguageModelCompletionEvent::Stop(StopReason::Refusal)
1232 ));
1233 }
1234
1235 #[test]
1236 fn responses_stream_completed_no_duplicate_after_tool_use() {
1237 let events = vec![
1238 responses::StreamEvent::OutputItemDone {
1239 output_index: 0,
1240 sequence_number: None,
1241 item: responses::ResponseOutputItem::FunctionCall {
1242 id: Some("fn_1".into()),
1243 call_id: "call_1".into(),
1244 name: "do_it".into(),
1245 arguments: "{}".into(),
1246 status: None,
1247 },
1248 },
1249 responses::StreamEvent::Completed {
1250 response: responses::Response::default(),
1251 },
1252 ];
1253
1254 let mapped = map_events(events);
1255
1256 let mut stop_count = 0usize;
1257 let mut saw_tool_use_stop = false;
1258 for event in mapped {
1259 if let LanguageModelCompletionEvent::Stop(reason) = event {
1260 stop_count += 1;
1261 if matches!(reason, StopReason::ToolUse) {
1262 saw_tool_use_stop = true;
1263 }
1264 }
1265 }
1266 assert_eq!(stop_count, 1, "should emit exactly one Stop event");
1267 assert!(saw_tool_use_stop, "Stop reason should be ToolUse");
1268 }
1269
1270 #[test]
1271 fn responses_stream_failed_maps_http_response_error() {
1272 let events = vec![responses::StreamEvent::Failed {
1273 response: responses::Response {
1274 error: Some(responses::ResponseError {
1275 code: "429".into(),
1276 message: "too many requests".into(),
1277 }),
1278 ..Default::default()
1279 },
1280 }];
1281
1282 let mapped_results = futures::executor::block_on(async {
1283 CopilotResponsesEventMapper::new()
1284 .map_stream(Box::pin(futures::stream::iter(events.into_iter().map(Ok))))
1285 .collect::<Vec<_>>()
1286 .await
1287 });
1288
1289 assert_eq!(mapped_results.len(), 1);
1290 match &mapped_results[0] {
1291 Err(LanguageModelCompletionError::HttpResponseError {
1292 status_code,
1293 message,
1294 ..
1295 }) => {
1296 assert_eq!(*status_code, http_client::StatusCode::TOO_MANY_REQUESTS);
1297 assert_eq!(message, "too many requests");
1298 }
1299 other => panic!("expected HttpResponseError, got {:?}", other),
1300 }
1301 }
1302}
1303struct ConfigurationView {
1304 copilot_status: Option<copilot::Status>,
1305 state: Entity<State>,
1306 _subscription: Option<Subscription>,
1307}
1308
1309impl ConfigurationView {
1310 pub fn new(state: Entity<State>, cx: &mut Context<Self>) -> Self {
1311 let copilot = Copilot::global(cx);
1312
1313 Self {
1314 copilot_status: copilot.as_ref().map(|copilot| copilot.read(cx).status()),
1315 state,
1316 _subscription: copilot.as_ref().map(|copilot| {
1317 cx.observe(copilot, |this, model, cx| {
1318 this.copilot_status = Some(model.read(cx).status());
1319 cx.notify();
1320 })
1321 }),
1322 }
1323 }
1324}
1325
1326impl Render for ConfigurationView {
1327 fn render(&mut self, _window: &mut Window, cx: &mut Context<Self>) -> impl IntoElement {
1328 if self.state.read(cx).is_authenticated(cx) {
1329 h_flex()
1330 .mt_1()
1331 .p_1()
1332 .justify_between()
1333 .rounded_md()
1334 .border_1()
1335 .border_color(cx.theme().colors().border)
1336 .bg(cx.theme().colors().background)
1337 .child(
1338 h_flex()
1339 .gap_1()
1340 .child(Icon::new(IconName::Check).color(Color::Success))
1341 .child(Label::new("Authorized")),
1342 )
1343 .child(
1344 Button::new("sign_out", "Sign Out")
1345 .label_size(LabelSize::Small)
1346 .on_click(|_, window, cx| {
1347 window.dispatch_action(copilot::SignOut.boxed_clone(), cx);
1348 }),
1349 )
1350 } else {
1351 let loading_icon = Icon::new(IconName::ArrowCircle).with_rotate_animation(4);
1352
1353 const ERROR_LABEL: &str = "Copilot Chat requires an active GitHub Copilot subscription. Please ensure Copilot is configured and try again, or use a different Assistant provider.";
1354
1355 match &self.copilot_status {
1356 Some(status) => match status {
1357 Status::Starting { task: _ } => h_flex()
1358 .gap_2()
1359 .child(loading_icon)
1360 .child(Label::new("Starting Copilot…")),
1361 Status::SigningIn { prompt: _ }
1362 | Status::SignedOut {
1363 awaiting_signing_in: true,
1364 } => h_flex()
1365 .gap_2()
1366 .child(loading_icon)
1367 .child(Label::new("Signing into Copilot…")),
1368 Status::Error(_) => {
1369 const LABEL: &str = "Copilot had issues starting. Please try restarting it. If the issue persists, try reinstalling Copilot.";
1370 v_flex()
1371 .gap_6()
1372 .child(Label::new(LABEL))
1373 .child(svg().size_8().path(IconName::CopilotError.path()))
1374 }
1375 _ => {
1376 const LABEL: &str = "To use Zed's agent with GitHub Copilot, you need to be logged in to GitHub. Note that your GitHub account must have an active Copilot Chat subscription.";
1377
1378 v_flex().gap_2().child(Label::new(LABEL)).child(
1379 Button::new("sign_in", "Sign in to use GitHub Copilot")
1380 .full_width()
1381 .style(ButtonStyle::Outlined)
1382 .icon_color(Color::Muted)
1383 .icon(IconName::Github)
1384 .icon_position(IconPosition::Start)
1385 .icon_size(IconSize::Small)
1386 .on_click(|_, window, cx| copilot::initiate_sign_in(window, cx)),
1387 )
1388 }
1389 },
1390 None => v_flex().gap_6().child(Label::new(ERROR_LABEL)),
1391 }
1392 }
1393 }
1394}