copilot_chat.rs

  1use std::path::PathBuf;
  2use std::sync::Arc;
  3use std::sync::OnceLock;
  4
  5use anyhow::Context as _;
  6use anyhow::{Result, anyhow};
  7use chrono::DateTime;
  8use collections::HashSet;
  9use fs::Fs;
 10use futures::{AsyncBufReadExt, AsyncReadExt, StreamExt, io::BufReader, stream::BoxStream};
 11use gpui::WeakEntity;
 12use gpui::{App, AsyncApp, Global, prelude::*};
 13use http_client::{AsyncBody, HttpClient, Method, Request as HttpRequest};
 14use itertools::Itertools;
 15use paths::home_dir;
 16use serde::{Deserialize, Serialize};
 17use settings::watch_config_dir;
 18
 19pub const COPILOT_OAUTH_ENV_VAR: &str = "GH_COPILOT_TOKEN";
 20
 21#[derive(Default, Clone, Debug, PartialEq)]
 22pub struct CopilotChatSettings {
 23    pub api_url: Arc<str>,
 24    pub auth_url: Arc<str>,
 25    pub models_url: Arc<str>,
 26}
 27
 28// Copilot's base model; defined by Microsoft in premium requests table
 29// This will be moved to the front of the Copilot model list, and will be used for
 30// 'fast' requests (e.g. title generation)
 31// https://docs.github.com/en/copilot/managing-copilot/monitoring-usage-and-entitlements/about-premium-requests
 32const DEFAULT_MODEL_ID: &str = "gpt-4.1";
 33
 34#[derive(Clone, Copy, Serialize, Deserialize, Debug, Eq, PartialEq)]
 35#[serde(rename_all = "lowercase")]
 36pub enum Role {
 37    User,
 38    Assistant,
 39    System,
 40}
 41
 42#[derive(Deserialize)]
 43struct ModelSchema {
 44    #[serde(deserialize_with = "deserialize_models_skip_errors")]
 45    data: Vec<Model>,
 46}
 47
 48fn deserialize_models_skip_errors<'de, D>(deserializer: D) -> Result<Vec<Model>, D::Error>
 49where
 50    D: serde::Deserializer<'de>,
 51{
 52    let raw_values = Vec::<serde_json::Value>::deserialize(deserializer)?;
 53    let models = raw_values
 54        .into_iter()
 55        .filter_map(|value| match serde_json::from_value::<Model>(value) {
 56            Ok(model) => Some(model),
 57            Err(err) => {
 58                log::warn!("GitHub Copilot Chat model failed to deserialize: {:?}", err);
 59                None
 60            }
 61        })
 62        .collect();
 63
 64    Ok(models)
 65}
 66
 67#[derive(Clone, Serialize, Deserialize, Debug, Eq, PartialEq)]
 68pub struct Model {
 69    capabilities: ModelCapabilities,
 70    id: String,
 71    name: String,
 72    policy: Option<ModelPolicy>,
 73    vendor: ModelVendor,
 74    model_picker_enabled: bool,
 75}
 76
 77#[derive(Clone, Serialize, Deserialize, Debug, Eq, PartialEq)]
 78struct ModelCapabilities {
 79    family: String,
 80    #[serde(default)]
 81    limits: ModelLimits,
 82    supports: ModelSupportedFeatures,
 83}
 84
 85#[derive(Default, Clone, Serialize, Deserialize, Debug, Eq, PartialEq)]
 86struct ModelLimits {
 87    #[serde(default)]
 88    max_context_window_tokens: usize,
 89    #[serde(default)]
 90    max_output_tokens: usize,
 91    #[serde(default)]
 92    max_prompt_tokens: usize,
 93}
 94
 95#[derive(Clone, Serialize, Deserialize, Debug, Eq, PartialEq)]
 96struct ModelPolicy {
 97    state: String,
 98}
 99
100#[derive(Clone, Serialize, Deserialize, Debug, Eq, PartialEq)]
101struct ModelSupportedFeatures {
102    #[serde(default)]
103    streaming: bool,
104    #[serde(default)]
105    tool_calls: bool,
106    #[serde(default)]
107    parallel_tool_calls: bool,
108    #[serde(default)]
109    vision: bool,
110}
111
112#[derive(Clone, Copy, Serialize, Deserialize, Debug, Eq, PartialEq)]
113pub enum ModelVendor {
114    // Azure OpenAI should have no functional difference from OpenAI in Copilot Chat
115    #[serde(alias = "Azure OpenAI")]
116    OpenAI,
117    Google,
118    Anthropic,
119}
120
121#[derive(Serialize, Deserialize, Debug, Eq, PartialEq, Clone)]
122#[serde(tag = "type")]
123pub enum ChatMessagePart {
124    #[serde(rename = "text")]
125    Text { text: String },
126    #[serde(rename = "image_url")]
127    Image { image_url: ImageUrl },
128}
129
130#[derive(Serialize, Deserialize, Debug, Eq, PartialEq, Clone)]
131pub struct ImageUrl {
132    pub url: String,
133}
134
135impl Model {
136    pub fn uses_streaming(&self) -> bool {
137        self.capabilities.supports.streaming
138    }
139
140    pub fn id(&self) -> &str {
141        self.id.as_str()
142    }
143
144    pub fn display_name(&self) -> &str {
145        self.name.as_str()
146    }
147
148    pub fn max_token_count(&self) -> usize {
149        self.capabilities.limits.max_prompt_tokens
150    }
151
152    pub fn supports_tools(&self) -> bool {
153        self.capabilities.supports.tool_calls
154    }
155
156    pub fn vendor(&self) -> ModelVendor {
157        self.vendor
158    }
159
160    pub fn supports_vision(&self) -> bool {
161        self.capabilities.supports.vision
162    }
163
164    pub fn supports_parallel_tool_calls(&self) -> bool {
165        self.capabilities.supports.parallel_tool_calls
166    }
167}
168
169#[derive(Serialize, Deserialize)]
170pub struct Request {
171    pub intent: bool,
172    pub n: usize,
173    pub stream: bool,
174    pub temperature: f32,
175    pub model: String,
176    pub messages: Vec<ChatMessage>,
177    #[serde(default, skip_serializing_if = "Vec::is_empty")]
178    pub tools: Vec<Tool>,
179    #[serde(default, skip_serializing_if = "Option::is_none")]
180    pub tool_choice: Option<ToolChoice>,
181}
182
183#[derive(Serialize, Deserialize)]
184pub struct Function {
185    pub name: String,
186    pub description: String,
187    pub parameters: serde_json::Value,
188}
189
190#[derive(Serialize, Deserialize)]
191#[serde(tag = "type", rename_all = "snake_case")]
192pub enum Tool {
193    Function { function: Function },
194}
195
196#[derive(Serialize, Deserialize)]
197#[serde(rename_all = "lowercase")]
198pub enum ToolChoice {
199    Auto,
200    Any,
201    None,
202}
203
204#[derive(Serialize, Deserialize, Debug)]
205#[serde(tag = "role", rename_all = "lowercase")]
206pub enum ChatMessage {
207    Assistant {
208        content: ChatMessageContent,
209        #[serde(default, skip_serializing_if = "Vec::is_empty")]
210        tool_calls: Vec<ToolCall>,
211    },
212    User {
213        content: ChatMessageContent,
214    },
215    System {
216        content: String,
217    },
218    Tool {
219        content: ChatMessageContent,
220        tool_call_id: String,
221    },
222}
223
224#[derive(Debug, Serialize, Deserialize)]
225#[serde(untagged)]
226pub enum ChatMessageContent {
227    Plain(String),
228    Multipart(Vec<ChatMessagePart>),
229}
230
231impl ChatMessageContent {
232    pub fn empty() -> Self {
233        ChatMessageContent::Multipart(vec![])
234    }
235}
236
237impl From<Vec<ChatMessagePart>> for ChatMessageContent {
238    fn from(mut parts: Vec<ChatMessagePart>) -> Self {
239        if let [ChatMessagePart::Text { text }] = parts.as_mut_slice() {
240            ChatMessageContent::Plain(std::mem::take(text))
241        } else {
242            ChatMessageContent::Multipart(parts)
243        }
244    }
245}
246
247impl From<String> for ChatMessageContent {
248    fn from(text: String) -> Self {
249        ChatMessageContent::Plain(text)
250    }
251}
252
253#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
254pub struct ToolCall {
255    pub id: String,
256    #[serde(flatten)]
257    pub content: ToolCallContent,
258}
259
260#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
261#[serde(tag = "type", rename_all = "lowercase")]
262pub enum ToolCallContent {
263    Function { function: FunctionContent },
264}
265
266#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
267pub struct FunctionContent {
268    pub name: String,
269    pub arguments: String,
270}
271
272#[derive(Deserialize, Debug)]
273#[serde(tag = "type", rename_all = "snake_case")]
274pub struct ResponseEvent {
275    pub choices: Vec<ResponseChoice>,
276    pub id: String,
277}
278
279#[derive(Debug, Deserialize)]
280pub struct ResponseChoice {
281    pub index: usize,
282    pub finish_reason: Option<String>,
283    pub delta: Option<ResponseDelta>,
284    pub message: Option<ResponseDelta>,
285}
286
287#[derive(Debug, Deserialize)]
288pub struct ResponseDelta {
289    pub content: Option<String>,
290    pub role: Option<Role>,
291    #[serde(default)]
292    pub tool_calls: Vec<ToolCallChunk>,
293}
294
295#[derive(Deserialize, Debug, Eq, PartialEq)]
296pub struct ToolCallChunk {
297    pub index: usize,
298    pub id: Option<String>,
299    pub function: Option<FunctionChunk>,
300}
301
302#[derive(Deserialize, Debug, Eq, PartialEq)]
303pub struct FunctionChunk {
304    pub name: Option<String>,
305    pub arguments: Option<String>,
306}
307
308#[derive(Deserialize)]
309struct ApiTokenResponse {
310    token: String,
311    expires_at: i64,
312}
313
314#[derive(Clone)]
315struct ApiToken {
316    api_key: String,
317    expires_at: DateTime<chrono::Utc>,
318}
319
320impl ApiToken {
321    pub fn remaining_seconds(&self) -> i64 {
322        self.expires_at
323            .timestamp()
324            .saturating_sub(chrono::Utc::now().timestamp())
325    }
326}
327
328impl TryFrom<ApiTokenResponse> for ApiToken {
329    type Error = anyhow::Error;
330
331    fn try_from(response: ApiTokenResponse) -> Result<Self, Self::Error> {
332        let expires_at =
333            DateTime::from_timestamp(response.expires_at, 0).context("invalid expires_at")?;
334
335        Ok(Self {
336            api_key: response.token,
337            expires_at,
338        })
339    }
340}
341
342struct GlobalCopilotChat(gpui::Entity<CopilotChat>);
343
344impl Global for GlobalCopilotChat {}
345
346pub struct CopilotChat {
347    oauth_token: Option<String>,
348    api_token: Option<ApiToken>,
349    settings: CopilotChatSettings,
350    models: Option<Vec<Model>>,
351    client: Arc<dyn HttpClient>,
352}
353
354pub fn init(fs: Arc<dyn Fs>, client: Arc<dyn HttpClient>, cx: &mut App) {
355    let copilot_chat = cx.new(|cx| CopilotChat::new(fs, client, cx));
356    cx.set_global(GlobalCopilotChat(copilot_chat));
357}
358
359pub fn copilot_chat_config_dir() -> &'static PathBuf {
360    static COPILOT_CHAT_CONFIG_DIR: OnceLock<PathBuf> = OnceLock::new();
361
362    COPILOT_CHAT_CONFIG_DIR.get_or_init(|| {
363        let config_dir = if cfg!(target_os = "windows") {
364            dirs::data_local_dir().expect("failed to determine LocalAppData directory")
365        } else {
366            std::env::var("XDG_CONFIG_HOME")
367                .map(PathBuf::from)
368                .unwrap_or_else(|_| home_dir().join(".config"))
369        };
370
371        config_dir.join("github-copilot")
372    })
373}
374
375fn copilot_chat_config_paths() -> [PathBuf; 2] {
376    let base_dir = copilot_chat_config_dir();
377    [base_dir.join("hosts.json"), base_dir.join("apps.json")]
378}
379
380impl CopilotChat {
381    pub fn global(cx: &App) -> Option<gpui::Entity<Self>> {
382        cx.try_global::<GlobalCopilotChat>()
383            .map(|model| model.0.clone())
384    }
385
386    fn new(fs: Arc<dyn Fs>, client: Arc<dyn HttpClient>, cx: &mut Context<Self>) -> Self {
387        let config_paths: HashSet<PathBuf> = copilot_chat_config_paths().into_iter().collect();
388        let dir_path = copilot_chat_config_dir();
389        let settings = CopilotChatSettings::default();
390        cx.spawn(async move |this, cx| {
391            let mut parent_watch_rx = watch_config_dir(
392                cx.background_executor(),
393                fs.clone(),
394                dir_path.clone(),
395                config_paths,
396            );
397            while let Some(contents) = parent_watch_rx.next().await {
398                let oauth_token = extract_oauth_token(contents);
399
400                this.update(cx, |this, cx| {
401                    this.oauth_token = oauth_token.clone();
402                    cx.notify();
403                })?;
404
405                if oauth_token.is_some() {
406                    Self::update_models(&this, cx).await?;
407                }
408            }
409            anyhow::Ok(())
410        })
411        .detach_and_log_err(cx);
412
413        let this = Self {
414            oauth_token: std::env::var(COPILOT_OAUTH_ENV_VAR).ok(),
415            api_token: None,
416            models: None,
417            settings,
418            client,
419        };
420        if this.oauth_token.is_some() {
421            cx.spawn(async move |this, mut cx| Self::update_models(&this, &mut cx).await)
422                .detach_and_log_err(cx);
423        }
424
425        this
426    }
427
428    async fn update_models(this: &WeakEntity<Self>, cx: &mut AsyncApp) -> Result<()> {
429        let (oauth_token, client, auth_url) = this.read_with(cx, |this, _| {
430            (
431                this.oauth_token.clone(),
432                this.client.clone(),
433                this.settings.auth_url.clone(),
434            )
435        })?;
436        let api_token = request_api_token(
437            &oauth_token.ok_or_else(|| {
438                anyhow!("OAuth token is missing while updating Copilot Chat models")
439            })?,
440            auth_url,
441            client.clone(),
442        )
443        .await?;
444
445        let models_url = this.update(cx, |this, cx| {
446            this.api_token = Some(api_token.clone());
447            cx.notify();
448            this.settings.models_url.clone()
449        })?;
450        let models = get_models(models_url, api_token.api_key, client.clone()).await?;
451
452        this.update(cx, |this, cx| {
453            this.models = Some(models);
454            cx.notify();
455        })?;
456        anyhow::Ok(())
457    }
458
459    pub fn is_authenticated(&self) -> bool {
460        self.oauth_token.is_some()
461    }
462
463    pub fn models(&self) -> Option<&[Model]> {
464        self.models.as_deref()
465    }
466
467    pub async fn stream_completion(
468        request: Request,
469        mut cx: AsyncApp,
470    ) -> Result<BoxStream<'static, Result<ResponseEvent>>> {
471        let this = cx
472            .update(|cx| Self::global(cx))
473            .ok()
474            .flatten()
475            .context("Copilot chat is not enabled")?;
476
477        let (oauth_token, api_token, client, api_url, auth_url) =
478            this.read_with(&cx, |this, _| {
479                (
480                    this.oauth_token.clone(),
481                    this.api_token.clone(),
482                    this.client.clone(),
483                    this.settings.api_url.clone(),
484                    this.settings.auth_url.clone(),
485                )
486            })?;
487
488        let oauth_token = oauth_token.context("No OAuth token available")?;
489
490        let token = match api_token {
491            Some(api_token) if api_token.remaining_seconds() > 5 * 60 => api_token.clone(),
492            _ => {
493                let token = request_api_token(&oauth_token, auth_url, client.clone()).await?;
494                this.update(&mut cx, |this, cx| {
495                    this.api_token = Some(token.clone());
496                    cx.notify();
497                })?;
498                token
499            }
500        };
501
502        stream_completion(client.clone(), token.api_key, api_url, request).await
503    }
504
505    pub fn set_settings(&mut self, settings: CopilotChatSettings, cx: &mut Context<Self>) {
506        let same_settings = self.settings == settings;
507        self.settings = settings;
508        if !same_settings {
509            cx.spawn(async move |this, cx| {
510                Self::update_models(&this, cx).await?;
511                Ok::<_, anyhow::Error>(())
512            })
513            .detach();
514        }
515    }
516}
517
518async fn get_models(
519    models_url: Arc<str>,
520    api_token: String,
521    client: Arc<dyn HttpClient>,
522) -> Result<Vec<Model>> {
523    let all_models = request_models(models_url, api_token, client).await?;
524
525    let mut models: Vec<Model> = all_models
526        .into_iter()
527        .filter(|model| {
528            // Ensure user has access to the model; Policy is present only for models that must be
529            // enabled in the GitHub dashboard
530            model.model_picker_enabled
531                && model
532                    .policy
533                    .as_ref()
534                    .is_none_or(|policy| policy.state == "enabled")
535        })
536        // The first model from the API response, in any given family, appear to be the non-tagged
537        // models, which are likely the best choice (e.g. gpt-4o rather than gpt-4o-2024-11-20)
538        .dedup_by(|a, b| a.capabilities.family == b.capabilities.family)
539        .collect();
540
541    if let Some(default_model_position) =
542        models.iter().position(|model| model.id == DEFAULT_MODEL_ID)
543    {
544        let default_model = models.remove(default_model_position);
545        models.insert(0, default_model);
546    }
547
548    Ok(models)
549}
550
551async fn request_models(
552    models_url: Arc<str>,
553    api_token: String,
554    client: Arc<dyn HttpClient>,
555) -> Result<Vec<Model>> {
556    let request_builder = HttpRequest::builder()
557        .method(Method::GET)
558        .uri(models_url.as_ref())
559        .header("Authorization", format!("Bearer {}", api_token))
560        .header("Content-Type", "application/json")
561        .header("Copilot-Integration-Id", "vscode-chat");
562
563    let request = request_builder.body(AsyncBody::empty())?;
564
565    let mut response = client.send(request).await?;
566
567    anyhow::ensure!(
568        response.status().is_success(),
569        "Failed to request models: {}",
570        response.status()
571    );
572    let mut body = Vec::new();
573    response.body_mut().read_to_end(&mut body).await?;
574
575    let body_str = std::str::from_utf8(&body)?;
576
577    let models = serde_json::from_str::<ModelSchema>(body_str)?.data;
578
579    Ok(models)
580}
581
582async fn request_api_token(
583    oauth_token: &str,
584    auth_url: Arc<str>,
585    client: Arc<dyn HttpClient>,
586) -> Result<ApiToken> {
587    let request_builder = HttpRequest::builder()
588        .method(Method::GET)
589        .uri(auth_url.as_ref())
590        .header("Authorization", format!("token {}", oauth_token))
591        .header("Accept", "application/json");
592
593    let request = request_builder.body(AsyncBody::empty())?;
594
595    let mut response = client.send(request).await?;
596
597    if response.status().is_success() {
598        let mut body = Vec::new();
599        response.body_mut().read_to_end(&mut body).await?;
600
601        let body_str = std::str::from_utf8(&body)?;
602
603        let parsed: ApiTokenResponse = serde_json::from_str(body_str)?;
604        ApiToken::try_from(parsed)
605    } else {
606        let mut body = Vec::new();
607        response.body_mut().read_to_end(&mut body).await?;
608
609        let body_str = std::str::from_utf8(&body)?;
610        anyhow::bail!("Failed to request API token: {body_str}");
611    }
612}
613
614fn extract_oauth_token(contents: String) -> Option<String> {
615    serde_json::from_str::<serde_json::Value>(&contents)
616        .map(|v| {
617            v.as_object().and_then(|obj| {
618                obj.iter().find_map(|(key, value)| {
619                    if key.starts_with("github.com") {
620                        value["oauth_token"].as_str().map(|v| v.to_string())
621                    } else {
622                        None
623                    }
624                })
625            })
626        })
627        .ok()
628        .flatten()
629}
630
631async fn stream_completion(
632    client: Arc<dyn HttpClient>,
633    api_key: String,
634    completion_url: Arc<str>,
635    request: Request,
636) -> Result<BoxStream<'static, Result<ResponseEvent>>> {
637    let is_vision_request = request.messages.last().map_or(false, |message| match message {
638        ChatMessage::User { content }
639        | ChatMessage::Assistant { content, .. }
640        | ChatMessage::Tool { content, .. } => {
641            matches!(content, ChatMessageContent::Multipart(parts) if parts.iter().any(|part| matches!(part, ChatMessagePart::Image { .. })))
642        }
643        _ => false,
644    });
645
646    let request_builder = HttpRequest::builder()
647        .method(Method::POST)
648        .uri(completion_url.as_ref())
649        .header(
650            "Editor-Version",
651            format!(
652                "Zed/{}",
653                option_env!("CARGO_PKG_VERSION").unwrap_or("unknown")
654            ),
655        )
656        .header("Authorization", format!("Bearer {}", api_key))
657        .header("Content-Type", "application/json")
658        .header("Copilot-Integration-Id", "vscode-chat")
659        .header("Copilot-Vision-Request", is_vision_request.to_string());
660
661    let is_streaming = request.stream;
662
663    let json = serde_json::to_string(&request)?;
664    let request = request_builder.body(AsyncBody::from(json))?;
665    let mut response = client.send(request).await?;
666
667    if !response.status().is_success() {
668        let mut body = Vec::new();
669        response.body_mut().read_to_end(&mut body).await?;
670        let body_str = std::str::from_utf8(&body)?;
671        anyhow::bail!(
672            "Failed to connect to API: {} {}",
673            response.status(),
674            body_str
675        );
676    }
677
678    if is_streaming {
679        let reader = BufReader::new(response.into_body());
680        Ok(reader
681            .lines()
682            .filter_map(|line| async move {
683                match line {
684                    Ok(line) => {
685                        let line = line.strip_prefix("data: ")?;
686                        if line.starts_with("[DONE]") {
687                            return None;
688                        }
689
690                        match serde_json::from_str::<ResponseEvent>(line) {
691                            Ok(response) => {
692                                if response.choices.is_empty() {
693                                    None
694                                } else {
695                                    Some(Ok(response))
696                                }
697                            }
698                            Err(error) => Some(Err(anyhow!(error))),
699                        }
700                    }
701                    Err(error) => Some(Err(anyhow!(error))),
702                }
703            })
704            .boxed())
705    } else {
706        let mut body = Vec::new();
707        response.body_mut().read_to_end(&mut body).await?;
708        let body_str = std::str::from_utf8(&body)?;
709        let response: ResponseEvent = serde_json::from_str(body_str)?;
710
711        Ok(futures::stream::once(async move { Ok(response) }).boxed())
712    }
713}
714
715#[cfg(test)]
716mod tests {
717    use super::*;
718
719    #[test]
720    fn test_resilient_model_schema_deserialize() {
721        let json = r#"{
722              "data": [
723                {
724                  "capabilities": {
725                    "family": "gpt-4",
726                    "limits": {
727                      "max_context_window_tokens": 32768,
728                      "max_output_tokens": 4096,
729                      "max_prompt_tokens": 32768
730                    },
731                    "object": "model_capabilities",
732                    "supports": { "streaming": true, "tool_calls": true },
733                    "tokenizer": "cl100k_base",
734                    "type": "chat"
735                  },
736                  "id": "gpt-4",
737                  "model_picker_enabled": false,
738                  "name": "GPT 4",
739                  "object": "model",
740                  "preview": false,
741                  "vendor": "Azure OpenAI",
742                  "version": "gpt-4-0613"
743                },
744                {
745                    "some-unknown-field": 123
746                },
747                {
748                  "capabilities": {
749                    "family": "claude-3.7-sonnet",
750                    "limits": {
751                      "max_context_window_tokens": 200000,
752                      "max_output_tokens": 16384,
753                      "max_prompt_tokens": 90000,
754                      "vision": {
755                        "max_prompt_image_size": 3145728,
756                        "max_prompt_images": 1,
757                        "supported_media_types": ["image/jpeg", "image/png", "image/webp"]
758                      }
759                    },
760                    "object": "model_capabilities",
761                    "supports": {
762                      "parallel_tool_calls": true,
763                      "streaming": true,
764                      "tool_calls": true,
765                      "vision": true
766                    },
767                    "tokenizer": "o200k_base",
768                    "type": "chat"
769                  },
770                  "id": "claude-3.7-sonnet",
771                  "model_picker_enabled": true,
772                  "name": "Claude 3.7 Sonnet",
773                  "object": "model",
774                  "policy": {
775                    "state": "enabled",
776                    "terms": "Enable access to the latest Claude 3.7 Sonnet model from Anthropic. [Learn more about how GitHub Copilot serves Claude 3.7 Sonnet](https://docs.github.com/copilot/using-github-copilot/using-claude-sonnet-in-github-copilot)."
777                  },
778                  "preview": false,
779                  "vendor": "Anthropic",
780                  "version": "claude-3.7-sonnet"
781                }
782              ],
783              "object": "list"
784            }"#;
785
786        let schema: ModelSchema = serde_json::from_str(&json).unwrap();
787
788        assert_eq!(schema.data.len(), 2);
789        assert_eq!(schema.data[0].id, "gpt-4");
790        assert_eq!(schema.data[1].id, "claude-3.7-sonnet");
791    }
792}