Add GPT-4o as possible model (#11764)

Toon Willems created

Resolves: #11766

Release Notes:

- Add GPT-4o support (see: https://openai.com/index/hello-gpt-4o/).
GPT-4o is better and faster than 4-turbo, at half the price.

Change summary

assets/settings/default.json               | 3 ++-
crates/assistant/src/assistant_panel.rs    | 3 ++-
crates/assistant/src/assistant_settings.rs | 4 ++--
crates/open_ai/src/open_ai.rs              | 8 +++++++-
4 files changed, 13 insertions(+), 5 deletions(-)

Detailed changes

assets/settings/default.json 🔗

@@ -300,7 +300,8 @@
       // 1. "gpt-3.5-turbo"
       // 2. "gpt-4"
       // 3. "gpt-4-turbo-preview"
-      "default_model": "gpt-4-turbo-preview"
+      // 4. "gpt-4o"
+      "default_model": "gpt-4o"
     }
   },
   // Whether the screen sharing icon is shown in the os status bar.

crates/assistant/src/assistant_panel.rs 🔗

@@ -771,7 +771,8 @@ impl AssistantPanel {
             LanguageModel::OpenAi(model) => LanguageModel::OpenAi(match &model {
                 open_ai::Model::ThreePointFiveTurbo => open_ai::Model::Four,
                 open_ai::Model::Four => open_ai::Model::FourTurbo,
-                open_ai::Model::FourTurbo => open_ai::Model::ThreePointFiveTurbo,
+                open_ai::Model::FourTurbo => open_ai::Model::FourOmni,
+                open_ai::Model::FourOmni => open_ai::Model::ThreePointFiveTurbo,
             }),
             LanguageModel::ZedDotDev(model) => LanguageModel::ZedDotDev(match &model {
                 ZedDotDevModel::Gpt3Point5Turbo => ZedDotDevModel::Gpt4,

crates/assistant/src/assistant_settings.rs 🔗

@@ -414,7 +414,7 @@ mod tests {
         assert_eq!(
             AssistantSettings::get_global(cx).provider,
             AssistantProvider::OpenAi {
-                default_model: OpenAiModel::FourTurbo,
+                default_model: OpenAiModel::FourOmni,
                 api_url: open_ai_url(),
                 low_speed_timeout_in_seconds: None,
             }
@@ -436,7 +436,7 @@ mod tests {
         assert_eq!(
             AssistantSettings::get_global(cx).provider,
             AssistantProvider::OpenAi {
-                default_model: OpenAiModel::FourTurbo,
+                default_model: OpenAiModel::FourOmni,
                 api_url: "test-url".into(),
                 low_speed_timeout_in_seconds: None,
             }

crates/open_ai/src/open_ai.rs 🔗

@@ -51,8 +51,10 @@ pub enum Model {
     #[serde(rename = "gpt-4", alias = "gpt-4-0613")]
     Four,
     #[serde(rename = "gpt-4-turbo-preview", alias = "gpt-4-1106-preview")]
-    #[default]
     FourTurbo,
+    #[serde(rename = "gpt-4o", alias = "gpt-4o-2024-05-13")]
+    #[default]
+    FourOmni,
 }
 
 impl Model {
@@ -61,6 +63,7 @@ impl Model {
             "gpt-3.5-turbo" => Ok(Self::ThreePointFiveTurbo),
             "gpt-4" => Ok(Self::Four),
             "gpt-4-turbo-preview" => Ok(Self::FourTurbo),
+            "gpt-4o" => Ok(Self::FourOmni),
             _ => Err(anyhow!("invalid model id")),
         }
     }
@@ -70,6 +73,7 @@ impl Model {
             Self::ThreePointFiveTurbo => "gpt-3.5-turbo",
             Self::Four => "gpt-4",
             Self::FourTurbo => "gpt-4-turbo-preview",
+            Self::FourOmni => "gpt-4o",
         }
     }
 
@@ -78,6 +82,7 @@ impl Model {
             Self::ThreePointFiveTurbo => "gpt-3.5-turbo",
             Self::Four => "gpt-4",
             Self::FourTurbo => "gpt-4-turbo",
+            Self::FourOmni => "gpt-4o",
         }
     }
 
@@ -86,6 +91,7 @@ impl Model {
             Model::ThreePointFiveTurbo => 4096,
             Model::Four => 8192,
             Model::FourTurbo => 128000,
+            Model::FourOmni => 128000,
         }
     }
 }