Remove deprecated Gemini 3 Pro Preview (#50503)

Richard Feldman created

Gemini 3 Pro Preview has been deprecated in favor of Gemini 3.1 Pro.
This removes the `Gemini3Pro` variant from the `Model` enum and all
associated match arms, updates eval model lists, docs, and test
fixtures.

A serde alias (`"gemini-3-pro-preview"`) is kept on `Gemini31Pro` so
existing user settings gracefully migrate to the replacement model.

Closes AI-66

Release Notes:

- Removed deprecated Gemini 3 Pro Preview model; existing configurations
automatically migrate to Gemini 3.1 Pro.

Change summary

.github/ISSUE_TEMPLATE/10_bug_report.yml             |  2 +-
.github/workflows/run_cron_unit_evals.yml            |  2 +-
crates/google_ai/src/google_ai.rs                    | 14 ++------------
crates/language_models/src/provider/open_router.rs   |  8 ++++----
crates/ui/src/components/callout.rs                  |  2 +-
docs/src/ai/models.md                                |  8 ++------
tooling/xtask/src/tasks/workflows/run_agent_evals.rs |  2 +-
7 files changed, 12 insertions(+), 26 deletions(-)

Detailed changes

.github/ISSUE_TEMPLATE/10_bug_report.yml 🔗

@@ -100,7 +100,7 @@ body:
       label: (for AI issues) Model provider details
       placeholder: |
         - Provider: (Anthropic via ZedPro, Anthropic via API key, Copilot Chat, Mistral, OpenAI, etc.)
-        - Model Name: (Claude Sonnet 4.5, Gemini 3 Pro, GPT-5)
+        - Model Name: (Claude Sonnet 4.5, Gemini 3.1 Pro, GPT-5)
         - Mode: (Agent Panel, Inline Assistant, Terminal Assistant or Text Threads)
         - Other details (ACPs, MCPs, other settings, etc.):
     validations:

.github/workflows/run_cron_unit_evals.yml 🔗

@@ -16,7 +16,7 @@ jobs:
         model:
         - anthropic/claude-sonnet-4-5-latest
         - anthropic/claude-opus-4-5-latest
-        - google/gemini-3-pro
+        - google/gemini-3.1-pro
         - openai/gpt-5
       fail-fast: false
     steps:

crates/google_ai/src/google_ai.rs 🔗

@@ -510,11 +510,9 @@ pub enum Model {
         alias = "gemini-2.5-pro-preview-06-05"
     )]
     Gemini25Pro,
-    #[serde(rename = "gemini-3-pro-preview")]
-    Gemini3Pro,
     #[serde(rename = "gemini-3-flash-preview")]
     Gemini3Flash,
-    #[serde(rename = "gemini-3.1-pro-preview")]
+    #[serde(rename = "gemini-3.1-pro-preview", alias = "gemini-3-pro-preview")]
     Gemini31Pro,
     #[serde(rename = "custom")]
     Custom {
@@ -537,7 +535,6 @@ impl Model {
             Self::Gemini25FlashLite => "gemini-2.5-flash-lite",
             Self::Gemini25Flash => "gemini-2.5-flash",
             Self::Gemini25Pro => "gemini-2.5-pro",
-            Self::Gemini3Pro => "gemini-3-pro-preview",
             Self::Gemini3Flash => "gemini-3-flash-preview",
             Self::Gemini31Pro => "gemini-3.1-pro-preview",
             Self::Custom { name, .. } => name,
@@ -548,7 +545,6 @@ impl Model {
             Self::Gemini25FlashLite => "gemini-2.5-flash-lite",
             Self::Gemini25Flash => "gemini-2.5-flash",
             Self::Gemini25Pro => "gemini-2.5-pro",
-            Self::Gemini3Pro => "gemini-3-pro-preview",
             Self::Gemini3Flash => "gemini-3-flash-preview",
             Self::Gemini31Pro => "gemini-3.1-pro-preview",
             Self::Custom { name, .. } => name,
@@ -560,7 +556,6 @@ impl Model {
             Self::Gemini25FlashLite => "Gemini 2.5 Flash-Lite",
             Self::Gemini25Flash => "Gemini 2.5 Flash",
             Self::Gemini25Pro => "Gemini 2.5 Pro",
-            Self::Gemini3Pro => "Gemini 3 Pro",
             Self::Gemini3Flash => "Gemini 3 Flash",
             Self::Gemini31Pro => "Gemini 3.1 Pro",
             Self::Custom {
@@ -574,7 +569,6 @@ impl Model {
             Self::Gemini25FlashLite
             | Self::Gemini25Flash
             | Self::Gemini25Pro
-            | Self::Gemini3Pro
             | Self::Gemini3Flash
             | Self::Gemini31Pro => 1_048_576,
             Self::Custom { max_tokens, .. } => *max_tokens,
@@ -586,7 +580,6 @@ impl Model {
             Model::Gemini25FlashLite
             | Model::Gemini25Flash
             | Model::Gemini25Pro
-            | Model::Gemini3Pro
             | Model::Gemini3Flash
             | Model::Gemini31Pro => Some(65_536),
             Model::Custom { .. } => None,
@@ -603,10 +596,7 @@ impl Model {
 
     pub fn mode(&self) -> GoogleModelMode {
         match self {
-            Self::Gemini25FlashLite
-            | Self::Gemini25Flash
-            | Self::Gemini25Pro
-            | Self::Gemini3Pro => {
+            Self::Gemini25FlashLite | Self::Gemini25Flash | Self::Gemini25Pro => {
                 GoogleModelMode::Thinking {
                     // By default these models are set to "auto", so we preserve that behavior
                     // but indicate they are capable of thinking mode

crates/language_models/src/provider/open_router.rs 🔗

@@ -889,7 +889,7 @@ mod tests {
             ResponseStreamEvent {
                 id: Some("response_123".into()),
                 created: 1234567890,
-                model: "google/gemini-3-pro-preview".into(),
+                model: "google/gemini-3.1-pro-preview".into(),
                 choices: vec![ChoiceDelta {
                     index: 0,
                     delta: ResponseMessageDelta {
@@ -914,7 +914,7 @@ mod tests {
             ResponseStreamEvent {
                 id: Some("response_123".into()),
                 created: 1234567890,
-                model: "google/gemini-3-pro-preview".into(),
+                model: "google/gemini-3.1-pro-preview".into(),
                 choices: vec![ChoiceDelta {
                     index: 0,
                     delta: ResponseMessageDelta {
@@ -940,7 +940,7 @@ mod tests {
             ResponseStreamEvent {
                 id: Some("response_123".into()),
                 created: 1234567890,
-                model: "google/gemini-3-pro-preview".into(),
+                model: "google/gemini-3.1-pro-preview".into(),
                 choices: vec![ChoiceDelta {
                     index: 0,
                     delta: ResponseMessageDelta {
@@ -967,7 +967,7 @@ mod tests {
             ResponseStreamEvent {
                 id: Some("response_123".into()),
                 created: 1234567890,
-                model: "google/gemini-3-pro-preview".into(),
+                model: "google/gemini-3.1-pro-preview".into(),
                 choices: vec![ChoiceDelta {
                     index: 0,
                     delta: ResponseMessageDelta {

crates/ui/src/components/callout.rs 🔗

@@ -295,7 +295,7 @@ impl Component for Callout {
                                 "Error details:",
                                 "• Quota exceeded for metric",
                                 "• Limit: 0",
-                                "• Model: gemini-3-pro",
+                                "• Model: gemini-3.1-pro",
                                 "Please retry in 26.33s.",
                                 "Additional details:",
                                 "- Request ID: abc123def456",

docs/src/ai/models.md 🔗

@@ -43,10 +43,6 @@ Zed's plans offer hosted versions of major LLMs with higher rate limits than dir
 |                        | OpenAI    | Cached Input        | $0.005                       | $0.0055                 |
 | Gemini 3.1 Pro         | Google    | Input               | $2.00                        | $2.20                   |
 |                        | Google    | Output              | $12.00                       | $13.20                  |
-| Gemini 3.1 Pro         | Google    | Input               | $2.00                        | $2.20                   |
-|                        | Google    | Output              | $12.00                       | $13.20                  |
-| Gemini 3 Pro           | Google    | Input               | $2.00                        | $2.20                   |
-|                        | Google    | Output              | $12.00                       | $13.20                  |
 | Gemini 3 Flash         | Google    | Input               | $0.30                        | $0.33                   |
 |                        | Google    | Output              | $2.50                        | $2.75                   |
 | Grok 4                 | X.ai      | Input               | $3.00                        | $3.30                   |
@@ -70,7 +66,8 @@ As of February 19, 2026, Zed Pro serves newer model versions in place of the ret
 - Claude Sonnet 4 → Claude Sonnet 4.5 or Claude Sonnet 4.6
 - Claude Sonnet 3.7 (retired Feb 19) → Claude Sonnet 4.5 or Claude Sonnet 4.6
 - GPT-5.1 and GPT-5 → GPT-5.2 or GPT-5.2 Codex
-- Gemini 2.5 Pro → Gemini 3 Pro or Gemini 3.1 Pro
+- Gemini 2.5 Pro → Gemini 3.1 Pro
+- Gemini 3 Pro → Gemini 3.1 Pro
 - Gemini 2.5 Flash → Gemini 3 Flash
 
 ## Usage {#usage}
@@ -95,7 +92,6 @@ A context window is the maximum span of text and code an LLM can consider at onc
 | GPT-5 mini        | OpenAI    | 400k                      |
 | GPT-5 nano        | OpenAI    | 400k                      |
 | Gemini 3.1 Pro    | Google    | 200k                      |
-| Gemini 3 Pro      | Google    | 200k                      |
 | Gemini 3 Flash    | Google    | 200k                      |
 
 > Context window limits for hosted Sonnet 4.5/4.6 and Gemini 3.1 Pro/3 Pro/Flash may increase in future releases.

tooling/xtask/src/tasks/workflows/run_agent_evals.rs 🔗

@@ -123,7 +123,7 @@ fn cron_unit_evals() -> NamedJob {
 const UNIT_EVAL_MODELS: &[&str] = &[
     "anthropic/claude-sonnet-4-5-latest",
     "anthropic/claude-opus-4-5-latest",
-    "google/gemini-3-pro",
+    "google/gemini-3.1-pro",
     "openai/gpt-5",
 ];