Add GPT-5.4 and GPT-5.4-pro BYOK models (#50858) (cherry-pick to preview) (#50896)

zed-zippy[bot] and Richard Feldman created

Cherry-pick of #50858 to preview

----
Add GPT-5.4 and GPT-5.4-pro as Bring Your Own Key model options for the
OpenAI provider.

**GPT-5.4** (`gpt-5.4`):
- 1,050,000 token context window, 128K max output
- Supports chat completions, images, parallel tool calls
- Default reasoning effort: none

**GPT-5.4-pro** (`gpt-5.4-pro`):
- 1,050,000 token context window, 128K max output
- Responses API only (no chat completions)
- Default reasoning effort: medium (supports medium/high/xhigh)

Also fixes context window sizes for GPT-5 mini and GPT-5 nano (272K →
400K) to match current OpenAI docs.

Closes AI-78

Release Notes:

- Added GPT-5.4 and GPT-5.4-pro as available models when using your own
OpenAI API key.

Co-authored-by: Richard Feldman <richard@zed.dev>

Change summary

crates/language_models/src/provider/open_ai.rs | 10 ++++--
crates/open_ai/src/open_ai.rs                  | 27 +++++++++++++++++--
2 files changed, 29 insertions(+), 8 deletions(-)

Detailed changes

crates/language_models/src/provider/open_ai.rs 🔗

@@ -310,6 +310,8 @@ impl LanguageModel for OpenAiLanguageModel {
             | Model::FivePointTwo
             | Model::FivePointTwoCodex
             | Model::FivePointThreeCodex
+            | Model::FivePointFour
+            | Model::FivePointFourPro
             | Model::O1
             | Model::O3 => true,
             Model::ThreePointFiveTurbo
@@ -1182,13 +1184,13 @@ pub fn count_open_ai_tokens(
             | Model::FiveCodex
             | Model::FiveMini
             | Model::FiveNano => tiktoken_rs::num_tokens_from_messages(model.id(), &messages),
-            // GPT-5.1, 5.2, 5.2-codex, and 5.3-codex don't have dedicated tiktoken support; use gpt-5 tokenizer
+            // GPT-5.1, 5.2, 5.2-codex, 5.3-codex, 5.4, and 5.4-pro don't have dedicated tiktoken support; use gpt-5 tokenizer
             Model::FivePointOne
             | Model::FivePointTwo
             | Model::FivePointTwoCodex
-            | Model::FivePointThreeCodex => {
-                tiktoken_rs::num_tokens_from_messages("gpt-5", &messages)
-            }
+            | Model::FivePointThreeCodex
+            | Model::FivePointFour
+            | Model::FivePointFourPro => tiktoken_rs::num_tokens_from_messages("gpt-5", &messages),
         }
         .map(|tokens| tokens as u64)
     })

crates/open_ai/src/open_ai.rs 🔗

@@ -90,6 +90,10 @@ pub enum Model {
     FivePointTwoCodex,
     #[serde(rename = "gpt-5.3-codex")]
     FivePointThreeCodex,
+    #[serde(rename = "gpt-5.4")]
+    FivePointFour,
+    #[serde(rename = "gpt-5.4-pro")]
+    FivePointFourPro,
     #[serde(rename = "custom")]
     Custom {
         name: String,
@@ -131,6 +135,8 @@ impl Model {
             "gpt-5.2" => Ok(Self::FivePointTwo),
             "gpt-5.2-codex" => Ok(Self::FivePointTwoCodex),
             "gpt-5.3-codex" => Ok(Self::FivePointThreeCodex),
+            "gpt-5.4" => Ok(Self::FivePointFour),
+            "gpt-5.4-pro" => Ok(Self::FivePointFourPro),
             invalid_id => anyhow::bail!("invalid model id '{invalid_id}'"),
         }
     }
@@ -153,6 +159,8 @@ impl Model {
             Self::FivePointTwo => "gpt-5.2",
             Self::FivePointTwoCodex => "gpt-5.2-codex",
             Self::FivePointThreeCodex => "gpt-5.3-codex",
+            Self::FivePointFour => "gpt-5.4",
+            Self::FivePointFourPro => "gpt-5.4-pro",
             Self::Custom { name, .. } => name,
         }
     }
@@ -175,6 +183,8 @@ impl Model {
             Self::FivePointTwo => "gpt-5.2",
             Self::FivePointTwoCodex => "gpt-5.2-codex",
             Self::FivePointThreeCodex => "gpt-5.3-codex",
+            Self::FivePointFour => "gpt-5.4",
+            Self::FivePointFourPro => "gpt-5.4-pro",
             Self::Custom { display_name, .. } => display_name.as_deref().unwrap_or(&self.id()),
         }
     }
@@ -191,12 +201,14 @@ impl Model {
             Self::O3 => 200_000,
             Self::Five => 272_000,
             Self::FiveCodex => 272_000,
-            Self::FiveMini => 272_000,
-            Self::FiveNano => 272_000,
+            Self::FiveMini => 400_000,
+            Self::FiveNano => 400_000,
             Self::FivePointOne => 400_000,
             Self::FivePointTwo => 400_000,
             Self::FivePointTwoCodex => 400_000,
             Self::FivePointThreeCodex => 400_000,
+            Self::FivePointFour => 1_050_000,
+            Self::FivePointFourPro => 1_050_000,
             Self::Custom { max_tokens, .. } => *max_tokens,
         }
     }
@@ -222,6 +234,8 @@ impl Model {
             Self::FivePointTwo => Some(128_000),
             Self::FivePointTwoCodex => Some(128_000),
             Self::FivePointThreeCodex => Some(128_000),
+            Self::FivePointFour => Some(128_000),
+            Self::FivePointFourPro => Some(128_000),
         }
     }
 
@@ -230,7 +244,7 @@ impl Model {
             Self::Custom {
                 reasoning_effort, ..
             } => reasoning_effort.to_owned(),
-            Self::FivePointThreeCodex => Some(ReasoningEffort::Medium),
+            Self::FivePointThreeCodex | Self::FivePointFourPro => Some(ReasoningEffort::Medium),
             _ => None,
         }
     }
@@ -241,7 +255,10 @@ impl Model {
                 supports_chat_completions,
                 ..
             } => *supports_chat_completions,
-            Self::FiveCodex | Self::FivePointTwoCodex | Self::FivePointThreeCodex => false,
+            Self::FiveCodex
+            | Self::FivePointTwoCodex
+            | Self::FivePointThreeCodex
+            | Self::FivePointFourPro => false,
             _ => true,
         }
     }
@@ -263,6 +280,8 @@ impl Model {
             | Self::FivePointTwo
             | Self::FivePointTwoCodex
             | Self::FivePointThreeCodex
+            | Self::FivePointFour
+            | Self::FivePointFourPro
             | Self::FiveNano => true,
             Self::O1 | Self::O3 | Self::O3Mini | Model::Custom { .. } => false,
         }