cherry pick v0.213.x 6631d8be (#43493)

Richard Feldman created

- **Fix Gemini 3 on OpenRouter (#43416)**
- **Add Gemini 3 support to Copilot (#43096)**

Change summary

crates/agent/src/db.rs                              |   1 
crates/agent/src/edit_agent.rs                      |   1 
crates/agent/src/edit_agent/evals.rs                |   3 
crates/agent/src/tests/mod.rs                       |  87 +++-
crates/agent/src/thread.rs                          |  20 +
crates/agent_ui/src/buffer_codegen.rs               |   1 
crates/agent_ui/src/terminal_inline_assistant.rs    |   1 
crates/assistant_text_thread/src/text_thread.rs     |   8 
crates/copilot/src/copilot_chat.rs                  |  23 
crates/copilot/src/copilot_responses.rs             |   7 
crates/eval/src/instance.rs                         |   8 
crates/git_ui/src/git_panel.rs                      |   1 
crates/language_model/src/language_model.rs         |   6 
crates/language_model/src/request.rs                |   2 
crates/language_models/src/provider/anthropic.rs    |   1 
crates/language_models/src/provider/copilot_chat.rs | 172 +++++++++
crates/language_models/src/provider/google.rs       |   4 
crates/language_models/src/provider/mistral.rs      |   3 
crates/language_models/src/provider/open_ai.rs      |   1 
crates/language_models/src/provider/open_router.rs  | 267 ++++++++++++++
crates/open_router/src/open_router.rs               |   8 
crates/rules_library/src/rules_library.rs           |   1 
22 files changed, 580 insertions(+), 46 deletions(-)

Detailed changes

crates/agent/src/db.rs πŸ”—

@@ -182,6 +182,7 @@ impl DbThread {
                     crate::Message::Agent(AgentMessage {
                         content,
                         tool_results,
+                        reasoning_details: None,
                     })
                 }
                 language_model::Role::System => {

crates/agent/src/edit_agent.rs πŸ”—

@@ -703,6 +703,7 @@ impl EditAgent {
             role: Role::User,
             content: vec![MessageContent::Text(prompt)],
             cache: false,
+            reasoning_details: None,
         });
 
         // Include tools in the request so that we can take advantage of

crates/agent/src/edit_agent/evals.rs πŸ”—

@@ -1081,6 +1081,7 @@ fn message(
         role,
         content: contents.into_iter().collect(),
         cache: false,
+        reasoning_details: None,
     }
 }
 
@@ -1268,6 +1269,7 @@ impl EvalAssertion {
                     role: Role::User,
                     content: vec![prompt.into()],
                     cache: false,
+                    reasoning_details: None,
                 }],
                 thinking_allowed: true,
                 ..Default::default()
@@ -1594,6 +1596,7 @@ impl EditAgentTest {
                 role: Role::System,
                 content: vec![MessageContent::Text(system_prompt)],
                 cache: true,
+                reasoning_details: None,
             }]
             .into_iter()
             .chain(eval.conversation)

crates/agent/src/tests/mod.rs πŸ”—

@@ -215,7 +215,8 @@ async fn test_prompt_caching(cx: &mut TestAppContext) {
         vec![LanguageModelRequestMessage {
             role: Role::User,
             content: vec!["Message 1".into()],
-            cache: true
+            cache: true,
+            reasoning_details: None,
         }]
     );
     fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::Text(
@@ -239,17 +240,20 @@ async fn test_prompt_caching(cx: &mut TestAppContext) {
             LanguageModelRequestMessage {
                 role: Role::User,
                 content: vec!["Message 1".into()],
-                cache: false
+                cache: false,
+                reasoning_details: None,
             },
             LanguageModelRequestMessage {
                 role: Role::Assistant,
                 content: vec!["Response to Message 1".into()],
-                cache: false
+                cache: false,
+                reasoning_details: None,
             },
             LanguageModelRequestMessage {
                 role: Role::User,
                 content: vec!["Message 2".into()],
-                cache: true
+                cache: true,
+                reasoning_details: None,
             }
         ]
     );
@@ -295,37 +299,44 @@ async fn test_prompt_caching(cx: &mut TestAppContext) {
             LanguageModelRequestMessage {
                 role: Role::User,
                 content: vec!["Message 1".into()],
-                cache: false
+                cache: false,
+                reasoning_details: None,
             },
             LanguageModelRequestMessage {
                 role: Role::Assistant,
                 content: vec!["Response to Message 1".into()],
-                cache: false
+                cache: false,
+                reasoning_details: None,
             },
             LanguageModelRequestMessage {
                 role: Role::User,
                 content: vec!["Message 2".into()],
-                cache: false
+                cache: false,
+                reasoning_details: None,
             },
             LanguageModelRequestMessage {
                 role: Role::Assistant,
                 content: vec!["Response to Message 2".into()],
-                cache: false
+                cache: false,
+                reasoning_details: None,
             },
             LanguageModelRequestMessage {
                 role: Role::User,
                 content: vec!["Use the echo tool".into()],
-                cache: false
+                cache: false,
+                reasoning_details: None,
             },
             LanguageModelRequestMessage {
                 role: Role::Assistant,
                 content: vec![MessageContent::ToolUse(tool_use)],
-                cache: false
+                cache: false,
+                reasoning_details: None,
             },
             LanguageModelRequestMessage {
                 role: Role::User,
                 content: vec![MessageContent::ToolResult(tool_result)],
-                cache: true
+                cache: true,
+                reasoning_details: None,
             }
         ]
     );
@@ -648,17 +659,20 @@ async fn test_resume_after_tool_use_limit(cx: &mut TestAppContext) {
             LanguageModelRequestMessage {
                 role: Role::User,
                 content: vec!["abc".into()],
-                cache: false
+                cache: false,
+                reasoning_details: None,
             },
             LanguageModelRequestMessage {
                 role: Role::Assistant,
                 content: vec![MessageContent::ToolUse(tool_use.clone())],
-                cache: false
+                cache: false,
+                reasoning_details: None,
             },
             LanguageModelRequestMessage {
                 role: Role::User,
                 content: vec![MessageContent::ToolResult(tool_result.clone())],
-                cache: true
+                cache: true,
+                reasoning_details: None,
             },
         ]
     );
@@ -684,22 +698,26 @@ async fn test_resume_after_tool_use_limit(cx: &mut TestAppContext) {
             LanguageModelRequestMessage {
                 role: Role::User,
                 content: vec!["abc".into()],
-                cache: false
+                cache: false,
+                reasoning_details: None,
             },
             LanguageModelRequestMessage {
                 role: Role::Assistant,
                 content: vec![MessageContent::ToolUse(tool_use)],
-                cache: false
+                cache: false,
+                reasoning_details: None,
             },
             LanguageModelRequestMessage {
                 role: Role::User,
                 content: vec![MessageContent::ToolResult(tool_result)],
-                cache: false
+                cache: false,
+                reasoning_details: None,
             },
             LanguageModelRequestMessage {
                 role: Role::User,
                 content: vec!["Continue where you left off".into()],
-                cache: true
+                cache: true,
+                reasoning_details: None,
             }
         ]
     );
@@ -773,22 +791,26 @@ async fn test_send_after_tool_use_limit(cx: &mut TestAppContext) {
             LanguageModelRequestMessage {
                 role: Role::User,
                 content: vec!["abc".into()],
-                cache: false
+                cache: false,
+                reasoning_details: None,
             },
             LanguageModelRequestMessage {
                 role: Role::Assistant,
                 content: vec![MessageContent::ToolUse(tool_use)],
-                cache: false
+                cache: false,
+                reasoning_details: None,
             },
             LanguageModelRequestMessage {
                 role: Role::User,
                 content: vec![MessageContent::ToolResult(tool_result)],
-                cache: false
+                cache: false,
+                reasoning_details: None,
             },
             LanguageModelRequestMessage {
                 role: Role::User,
                 content: vec!["ghi".into()],
-                cache: true
+                cache: true,
+                reasoning_details: None,
             }
         ]
     );
@@ -1831,7 +1853,8 @@ async fn test_building_request_with_pending_tools(cx: &mut TestAppContext) {
             LanguageModelRequestMessage {
                 role: Role::User,
                 content: vec!["Hey!".into()],
-                cache: true
+                cache: true,
+                reasoning_details: None,
             },
             LanguageModelRequestMessage {
                 role: Role::Assistant,
@@ -1839,7 +1862,8 @@ async fn test_building_request_with_pending_tools(cx: &mut TestAppContext) {
                     MessageContent::Text("Hi!".into()),
                     MessageContent::ToolUse(echo_tool_use.clone())
                 ],
-                cache: false
+                cache: false,
+                reasoning_details: None,
             },
             LanguageModelRequestMessage {
                 role: Role::User,
@@ -1850,7 +1874,8 @@ async fn test_building_request_with_pending_tools(cx: &mut TestAppContext) {
                     content: "test".into(),
                     output: Some("test".into())
                 })],
-                cache: false
+                cache: false,
+                reasoning_details: None,
             },
         ],
     );
@@ -2248,12 +2273,14 @@ async fn test_send_retry_finishes_tool_calls_on_error(cx: &mut TestAppContext) {
             LanguageModelRequestMessage {
                 role: Role::User,
                 content: vec!["Call the echo tool!".into()],
-                cache: false
+                cache: false,
+                reasoning_details: None,
             },
             LanguageModelRequestMessage {
                 role: Role::Assistant,
                 content: vec![language_model::MessageContent::ToolUse(tool_use_1.clone())],
-                cache: false
+                cache: false,
+                reasoning_details: None,
             },
             LanguageModelRequestMessage {
                 role: Role::User,
@@ -2266,7 +2293,8 @@ async fn test_send_retry_finishes_tool_calls_on_error(cx: &mut TestAppContext) {
                         output: Some("test".into())
                     }
                 )],
-                cache: true
+                cache: true,
+                reasoning_details: None,
             },
         ]
     );
@@ -2280,7 +2308,8 @@ async fn test_send_retry_finishes_tool_calls_on_error(cx: &mut TestAppContext) {
             thread.last_message(),
             Some(Message::Agent(AgentMessage {
                 content: vec![AgentMessageContent::Text("Done".into())],
-                tool_results: IndexMap::default()
+                tool_results: IndexMap::default(),
+                reasoning_details: None,
             }))
         );
     })

crates/agent/src/thread.rs πŸ”—

@@ -113,6 +113,7 @@ impl Message {
                 role: Role::User,
                 content: vec!["Continue where you left off".into()],
                 cache: false,
+                reasoning_details: None,
             }],
         }
     }
@@ -177,6 +178,7 @@ impl UserMessage {
             role: Role::User,
             content: Vec::with_capacity(self.content.len()),
             cache: false,
+            reasoning_details: None,
         };
 
         const OPEN_CONTEXT: &str = "<context>\n\
@@ -444,6 +446,7 @@ impl AgentMessage {
             role: Role::Assistant,
             content: Vec::with_capacity(self.content.len()),
             cache: false,
+            reasoning_details: self.reasoning_details.clone(),
         };
         for chunk in &self.content {
             match chunk {
@@ -479,6 +482,7 @@ impl AgentMessage {
             role: Role::User,
             content: Vec::new(),
             cache: false,
+            reasoning_details: None,
         };
 
         for tool_result in self.tool_results.values() {
@@ -508,6 +512,7 @@ impl AgentMessage {
 pub struct AgentMessage {
     pub content: Vec<AgentMessageContent>,
     pub tool_results: IndexMap<LanguageModelToolUseId, LanguageModelToolResult>,
+    pub reasoning_details: Option<serde_json::Value>,
 }
 
 #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
@@ -1393,6 +1398,18 @@ impl Thread {
                 self.handle_thinking_event(text, signature, event_stream, cx)
             }
             RedactedThinking { data } => self.handle_redacted_thinking_event(data, cx),
+            ReasoningDetails(details) => {
+                let last_message = self.pending_message();
+                // Store the last non-empty reasoning_details (overwrites earlier ones)
+                // This ensures we keep the encrypted reasoning with signatures, not the early text reasoning
+                if let serde_json::Value::Array(ref arr) = details {
+                    if !arr.is_empty() {
+                        last_message.reasoning_details = Some(details);
+                    }
+                } else {
+                    last_message.reasoning_details = Some(details);
+                }
+            }
             ToolUse(tool_use) => {
                 return Ok(self.handle_tool_use_event(tool_use, event_stream, cx));
             }
@@ -1672,6 +1689,7 @@ impl Thread {
             role: Role::User,
             content: vec![SUMMARIZE_THREAD_DETAILED_PROMPT.into()],
             cache: false,
+            reasoning_details: None,
         });
 
         let task = cx
@@ -1738,6 +1756,7 @@ impl Thread {
             role: Role::User,
             content: vec![SUMMARIZE_THREAD_PROMPT.into()],
             cache: false,
+            reasoning_details: None,
         });
         self.pending_title_generation = Some(cx.spawn(async move |this, cx| {
             let mut title = String::new();
@@ -1987,6 +2006,7 @@ impl Thread {
             role: Role::System,
             content: vec![system_prompt.into()],
             cache: false,
+            reasoning_details: None,
         }];
         for message in &self.messages {
             messages.extend(message.to_request());

crates/assistant_text_thread/src/text_thread.rs πŸ”—

@@ -1416,6 +1416,7 @@ impl TextThread {
                 role: Role::User,
                 content: vec!["Respond only with OK, nothing else.".into()],
                 cache: false,
+                reasoning_details: None,
             });
             req
         };
@@ -2083,6 +2084,11 @@ impl TextThread {
                                         }
                                     }
                                     LanguageModelCompletionEvent::StartMessage { .. } => {}
+                                    LanguageModelCompletionEvent::ReasoningDetails(_) => {
+                                        // ReasoningDetails are metadata (signatures, encrypted data, format info)
+                                        // used for request/response validation, not UI content.
+                                        // The displayable thinking text is already handled by the Thinking event.
+                                    }
                                     LanguageModelCompletionEvent::Stop(reason) => {
                                         stop_reason = reason;
                                     }
@@ -2306,6 +2312,7 @@ impl TextThread {
                 role: message.role,
                 content: Vec::new(),
                 cache: message.cache.as_ref().is_some_and(|cache| cache.is_anchor),
+                reasoning_details: None,
             };
 
             while let Some(content) = contents.peek() {
@@ -2677,6 +2684,7 @@ impl TextThread {
                 role: Role::User,
                 content: vec![SUMMARIZE_THREAD_PROMPT.into()],
                 cache: false,
+                reasoning_details: None,
             });
 
             // If there is no summary, it is set with `done: false` so that "Loading Summary…" can

crates/copilot/src/copilot_chat.rs πŸ”—

@@ -294,6 +294,10 @@ pub enum ChatMessage {
         content: ChatMessageContent,
         #[serde(default, skip_serializing_if = "Vec::is_empty")]
         tool_calls: Vec<ToolCall>,
+        #[serde(default, skip_serializing_if = "Option::is_none")]
+        reasoning_opaque: Option<String>,
+        #[serde(default, skip_serializing_if = "Option::is_none")]
+        reasoning_text: Option<String>,
     },
     User {
         content: ChatMessageContent,
@@ -353,6 +357,8 @@ pub enum ToolCallContent {
 pub struct FunctionContent {
     pub name: String,
     pub arguments: String,
+    #[serde(default, skip_serializing_if = "Option::is_none")]
+    pub thought_signature: Option<String>,
 }
 
 #[derive(Deserialize, Debug)]
@@ -384,6 +390,8 @@ pub struct ResponseDelta {
     pub role: Option<Role>,
     #[serde(default)]
     pub tool_calls: Vec<ToolCallChunk>,
+    pub reasoning_opaque: Option<String>,
+    pub reasoning_text: Option<String>,
 }
 #[derive(Deserialize, Debug, Eq, PartialEq)]
 pub struct ToolCallChunk {
@@ -396,6 +404,7 @@ pub struct ToolCallChunk {
 pub struct FunctionChunk {
     pub name: Option<String>,
     pub arguments: Option<String>,
+    pub thought_signature: Option<String>,
 }
 
 #[derive(Deserialize)]
@@ -783,13 +792,13 @@ async fn stream_completion(
     is_user_initiated: bool,
 ) -> Result<BoxStream<'static, Result<ResponseEvent>>> {
     let is_vision_request = request.messages.iter().any(|message| match message {
-      ChatMessage::User { content }
-      | ChatMessage::Assistant { content, .. }
-      | ChatMessage::Tool { content, .. } => {
-          matches!(content, ChatMessageContent::Multipart(parts) if parts.iter().any(|part| matches!(part, ChatMessagePart::Image { .. })))
-      }
-      _ => false,
-  });
+        ChatMessage::User { content }
+        | ChatMessage::Assistant { content, .. }
+        | ChatMessage::Tool { content, .. } => {
+            matches!(content, ChatMessageContent::Multipart(parts) if parts.iter().any(|part| matches!(part, ChatMessagePart::Image { .. })))
+        }
+        _ => false,
+    });
 
     let request_initiator = if is_user_initiated { "user" } else { "agent" };
 

crates/copilot/src/copilot_responses.rs πŸ”—

@@ -127,6 +127,8 @@ pub enum ResponseInputItem {
         arguments: String,
         #[serde(skip_serializing_if = "Option::is_none")]
         status: Option<ItemStatus>,
+        #[serde(default, skip_serializing_if = "Option::is_none")]
+        thought_signature: Option<String>,
     },
     FunctionCallOutput {
         call_id: String,
@@ -251,6 +253,8 @@ pub enum ResponseOutputItem {
         arguments: String,
         #[serde(skip_serializing_if = "Option::is_none")]
         status: Option<ItemStatus>,
+        #[serde(default, skip_serializing_if = "Option::is_none")]
+        thought_signature: Option<String>,
     },
     Reasoning {
         id: String,
@@ -309,7 +313,8 @@ pub async fn stream_response(
     };
 
     let is_streaming = request.stream;
-    let request = request_builder.body(AsyncBody::from(serde_json::to_string(&request)?))?;
+    let json = serde_json::to_string(&request)?;
+    let request = request_builder.body(AsyncBody::from(json))?;
     let mut response = client.send(request).await?;
 
     if !response.status().is_success() {

crates/eval/src/instance.rs πŸ”—

@@ -553,6 +553,7 @@ impl ExampleInstance {
                     role: Role::User,
                     content: vec![MessageContent::Text(to_prompt(assertion.description))],
                     cache: false,
+                    reasoning_details: None,
                 }],
                 temperature: None,
                 tools: Vec::new(),
@@ -1252,7 +1253,8 @@ pub fn response_events_to_markdown(
             Ok(
                 LanguageModelCompletionEvent::UsageUpdate(_)
                 | LanguageModelCompletionEvent::StartMessage { .. }
-                | LanguageModelCompletionEvent::StatusUpdate { .. },
+                | LanguageModelCompletionEvent::StatusUpdate(_)
+                | LanguageModelCompletionEvent::ReasoningDetails(_),
             ) => {}
             Ok(LanguageModelCompletionEvent::ToolUseJsonParseError {
                 json_parse_error, ..
@@ -1337,8 +1339,9 @@ impl ThreadDialog {
                 // Skip these
                 Ok(LanguageModelCompletionEvent::UsageUpdate(_))
                 | Ok(LanguageModelCompletionEvent::RedactedThinking { .. })
-                | Ok(LanguageModelCompletionEvent::StatusUpdate { .. })
+                | Ok(LanguageModelCompletionEvent::StatusUpdate(_))
                 | Ok(LanguageModelCompletionEvent::StartMessage { .. })
+                | Ok(LanguageModelCompletionEvent::ReasoningDetails(_))
                 | Ok(LanguageModelCompletionEvent::Stop(_)) => {}
 
                 Ok(LanguageModelCompletionEvent::ToolUseJsonParseError {
@@ -1366,6 +1369,7 @@ impl ThreadDialog {
                 role: Role::Assistant,
                 content,
                 cache: false,
+                reasoning_details: None,
             })
         } else {
             None

crates/git_ui/src/git_panel.rs πŸ”—

@@ -1864,6 +1864,7 @@ impl GitPanel {
                         role: Role::User,
                         content: vec![content.into()],
                         cache: false,
+            reasoning_details: None,
                     }],
                     tools: Vec::new(),
                     tool_choice: None,

crates/language_model/src/language_model.rs πŸ”—

@@ -90,6 +90,7 @@ pub enum LanguageModelCompletionEvent {
     StartMessage {
         message_id: String,
     },
+    ReasoningDetails(serde_json::Value),
     UsageUpdate(TokenUsage),
 }
 
@@ -617,6 +618,7 @@ pub trait LanguageModel: Send + Sync {
                                 Ok(LanguageModelCompletionEvent::Text(text)) => Some(Ok(text)),
                                 Ok(LanguageModelCompletionEvent::Thinking { .. }) => None,
                                 Ok(LanguageModelCompletionEvent::RedactedThinking { .. }) => None,
+                                Ok(LanguageModelCompletionEvent::ReasoningDetails(_)) => None,
                                 Ok(LanguageModelCompletionEvent::Stop(_)) => None,
                                 Ok(LanguageModelCompletionEvent::ToolUse(_)) => None,
                                 Ok(LanguageModelCompletionEvent::ToolUseJsonParseError {
@@ -971,8 +973,8 @@ mod tests {
         let original = LanguageModelToolUse {
             id: LanguageModelToolUseId::from("no_sig_id"),
             name: "no_sig_tool".into(),
-            raw_input: json!({"key": "value"}).to_string(),
-            input: json!({"key": "value"}),
+            raw_input: json!({"arg": "value"}).to_string(),
+            input: json!({"arg": "value"}),
             is_input_complete: true,
             thought_signature: None,
         };

crates/language_model/src/request.rs πŸ”—

@@ -357,6 +357,8 @@ pub struct LanguageModelRequestMessage {
     pub role: Role,
     pub content: Vec<MessageContent>,
     pub cache: bool,
+    #[serde(default, skip_serializing_if = "Option::is_none")]
+    pub reasoning_details: Option<serde_json::Value>,
 }
 
 impl LanguageModelRequestMessage {

crates/language_models/src/provider/copilot_chat.rs πŸ”—

@@ -359,17 +359,22 @@ pub fn map_to_language_model_completion_events(
         id: String,
         name: String,
         arguments: String,
+        thought_signature: Option<String>,
     }
 
     struct State {
         events: Pin<Box<dyn Send + Stream<Item = Result<ResponseEvent>>>>,
         tool_calls_by_index: HashMap<usize, RawToolCall>,
+        reasoning_opaque: Option<String>,
+        reasoning_text: Option<String>,
     }
 
     futures::stream::unfold(
         State {
             events,
             tool_calls_by_index: HashMap::default(),
+            reasoning_opaque: None,
+            reasoning_text: None,
         },
         move |mut state| async move {
             if let Some(event) = state.events.next().await {
@@ -400,6 +405,14 @@ pub fn map_to_language_model_completion_events(
                             events.push(Ok(LanguageModelCompletionEvent::Text(content)));
                         }
 
+                        // Capture reasoning data from the delta (e.g. for Gemini 3)
+                        if let Some(opaque) = delta.reasoning_opaque.clone() {
+                            state.reasoning_opaque = Some(opaque);
+                        }
+                        if let Some(text) = delta.reasoning_text.clone() {
+                            state.reasoning_text = Some(text);
+                        }
+
                         for (index, tool_call) in delta.tool_calls.iter().enumerate() {
                             let tool_index = tool_call.index.unwrap_or(index);
                             let entry = state.tool_calls_by_index.entry(tool_index).or_default();
@@ -416,6 +429,11 @@ pub fn map_to_language_model_completion_events(
                                 if let Some(arguments) = function.arguments.clone() {
                                     entry.arguments.push_str(&arguments);
                                 }
+
+                                if let Some(thought_signature) = function.thought_signature.clone()
+                                {
+                                    entry.thought_signature = Some(thought_signature);
+                                }
                             }
                         }
 
@@ -437,6 +455,32 @@ pub fn map_to_language_model_completion_events(
                                 )));
                             }
                             Some("tool_calls") => {
+                                // Gemini 3 models send reasoning_opaque/reasoning_text that must
+                                // be preserved and sent back in subsequent requests. Emit as
+                                // ReasoningDetails so the agent stores it in the message.
+                                if state.reasoning_opaque.is_some()
+                                    || state.reasoning_text.is_some()
+                                {
+                                    let mut details = serde_json::Map::new();
+                                    if let Some(opaque) = state.reasoning_opaque.take() {
+                                        details.insert(
+                                            "reasoning_opaque".to_string(),
+                                            serde_json::Value::String(opaque),
+                                        );
+                                    }
+                                    if let Some(text) = state.reasoning_text.take() {
+                                        details.insert(
+                                            "reasoning_text".to_string(),
+                                            serde_json::Value::String(text),
+                                        );
+                                    }
+                                    events.push(Ok(
+                                        LanguageModelCompletionEvent::ReasoningDetails(
+                                            serde_json::Value::Object(details),
+                                        ),
+                                    ));
+                                }
+
                                 events.extend(state.tool_calls_by_index.drain().map(
                                     |(_, tool_call)| {
                                         // The model can output an empty string
@@ -456,7 +500,7 @@ pub fn map_to_language_model_completion_events(
                                                 is_input_complete: true,
                                                 input,
                                                 raw_input: tool_call.arguments,
-                                                thought_signature: None,
+                                                thought_signature: tool_call.thought_signature,
                                             },
                                         )),
                                         Err(error) => Ok(
@@ -548,6 +592,7 @@ impl CopilotResponsesEventMapper {
                     call_id,
                     name,
                     arguments,
+                    thought_signature,
                     ..
                 } => {
                     let mut events = Vec::new();
@@ -559,7 +604,7 @@ impl CopilotResponsesEventMapper {
                                 is_input_complete: true,
                                 input,
                                 raw_input: arguments.clone(),
-                                thought_signature: None,
+                                thought_signature,
                             },
                         ))),
                         Err(error) => {
@@ -774,6 +819,7 @@ fn into_copilot_chat(
                                 function: copilot::copilot_chat::FunctionContent {
                                     name: tool_use.name.to_string(),
                                     arguments: serde_json::to_string(&tool_use.input)?,
+                                    thought_signature: tool_use.thought_signature.clone(),
                                 },
                             },
                         });
@@ -797,6 +843,22 @@ fn into_copilot_chat(
                     buffer
                 };
 
+                // Extract reasoning_opaque and reasoning_text from reasoning_details
+                let (reasoning_opaque, reasoning_text) =
+                    if let Some(details) = &message.reasoning_details {
+                        let opaque = details
+                            .get("reasoning_opaque")
+                            .and_then(|v| v.as_str())
+                            .map(|s| s.to_string());
+                        let text = details
+                            .get("reasoning_text")
+                            .and_then(|v| v.as_str())
+                            .map(|s| s.to_string());
+                        (opaque, text)
+                    } else {
+                        (None, None)
+                    };
+
                 messages.push(ChatMessage::Assistant {
                     content: if text_content.is_empty() {
                         ChatMessageContent::empty()
@@ -804,6 +866,8 @@ fn into_copilot_chat(
                         text_content.into()
                     },
                     tool_calls,
+                    reasoning_opaque,
+                    reasoning_text,
                 });
             }
             Role::System => messages.push(ChatMessage::System {
@@ -948,6 +1012,7 @@ fn into_copilot_responses(
                             name: tool_use.name.to_string(),
                             arguments: tool_use.raw_input.clone(),
                             status: None,
+                            thought_signature: tool_use.thought_signature.clone(),
                         });
                     }
                 }
@@ -1120,6 +1185,7 @@ mod tests {
                 name: "do_it".into(),
                 arguments: "{\"x\":1}".into(),
                 status: None,
+                thought_signature: None,
             },
         }];
 
@@ -1145,6 +1211,7 @@ mod tests {
                 name: "do_it".into(),
                 arguments: "{not json}".into(),
                 status: None,
+                thought_signature: None,
             },
         }];
 
@@ -1248,6 +1315,7 @@ mod tests {
                     name: "do_it".into(),
                     arguments: "{}".into(),
                     status: None,
+                    thought_signature: None,
                 },
             },
             responses::StreamEvent::Completed {
@@ -1303,6 +1371,106 @@ mod tests {
             other => panic!("expected HttpResponseError, got {:?}", other),
         }
     }
+
+    #[test]
+    fn chat_completions_stream_maps_reasoning_data() {
+        use copilot::copilot_chat::ResponseEvent;
+
+        let events = vec![
+            ResponseEvent {
+                choices: vec![copilot::copilot_chat::ResponseChoice {
+                    index: Some(0),
+                    finish_reason: None,
+                    delta: Some(copilot::copilot_chat::ResponseDelta {
+                        content: None,
+                        role: Some(copilot::copilot_chat::Role::Assistant),
+                        tool_calls: vec![copilot::copilot_chat::ToolCallChunk {
+                            index: Some(0),
+                            id: Some("call_abc123".to_string()),
+                            function: Some(copilot::copilot_chat::FunctionChunk {
+                                name: Some("list_directory".to_string()),
+                                arguments: Some("{\"path\":\"test\"}".to_string()),
+                                thought_signature: None,
+                            }),
+                        }],
+                        reasoning_opaque: Some("encrypted_reasoning_token_xyz".to_string()),
+                        reasoning_text: Some("Let me check the directory".to_string()),
+                    }),
+                    message: None,
+                }],
+                id: "chatcmpl-123".to_string(),
+                usage: None,
+            },
+            ResponseEvent {
+                choices: vec![copilot::copilot_chat::ResponseChoice {
+                    index: Some(0),
+                    finish_reason: Some("tool_calls".to_string()),
+                    delta: Some(copilot::copilot_chat::ResponseDelta {
+                        content: None,
+                        role: None,
+                        tool_calls: vec![],
+                        reasoning_opaque: None,
+                        reasoning_text: None,
+                    }),
+                    message: None,
+                }],
+                id: "chatcmpl-123".to_string(),
+                usage: None,
+            },
+        ];
+
+        let mapped = futures::executor::block_on(async {
+            map_to_language_model_completion_events(
+                Box::pin(futures::stream::iter(events.into_iter().map(Ok))),
+                true,
+            )
+            .collect::<Vec<_>>()
+            .await
+        });
+
+        let mut has_reasoning_details = false;
+        let mut has_tool_use = false;
+        let mut reasoning_opaque_value: Option<String> = None;
+        let mut reasoning_text_value: Option<String> = None;
+
+        for event_result in mapped {
+            match event_result {
+                Ok(LanguageModelCompletionEvent::ReasoningDetails(details)) => {
+                    has_reasoning_details = true;
+                    reasoning_opaque_value = details
+                        .get("reasoning_opaque")
+                        .and_then(|v| v.as_str())
+                        .map(|s| s.to_string());
+                    reasoning_text_value = details
+                        .get("reasoning_text")
+                        .and_then(|v| v.as_str())
+                        .map(|s| s.to_string());
+                }
+                Ok(LanguageModelCompletionEvent::ToolUse(tool_use)) => {
+                    has_tool_use = true;
+                    assert_eq!(tool_use.id.to_string(), "call_abc123");
+                    assert_eq!(tool_use.name.as_ref(), "list_directory");
+                }
+                _ => {}
+            }
+        }
+
+        assert!(
+            has_reasoning_details,
+            "Should emit ReasoningDetails event for Gemini 3 reasoning"
+        );
+        assert!(has_tool_use, "Should emit ToolUse event");
+        assert_eq!(
+            reasoning_opaque_value,
+            Some("encrypted_reasoning_token_xyz".to_string()),
+            "Should capture reasoning_opaque"
+        );
+        assert_eq!(
+            reasoning_text_value,
+            Some("Let me check the directory".to_string()),
+            "Should capture reasoning_text"
+        );
+    }
 }
 struct ConfigurationView {
     copilot_status: Option<copilot::Status>,

crates/language_models/src/provider/google.rs πŸ”—

@@ -1094,6 +1094,7 @@ mod tests {
                     role: Role::Assistant,
                     content: vec![MessageContent::ToolUse(tool_use)],
                     cache: false,
+                    reasoning_details: None,
                 }],
                 ..Default::default()
             },
@@ -1130,6 +1131,7 @@ mod tests {
                     role: Role::Assistant,
                     content: vec![MessageContent::ToolUse(tool_use)],
                     cache: false,
+                    reasoning_details: None,
                 }],
                 ..Default::default()
             },
@@ -1162,6 +1164,7 @@ mod tests {
                     role: Role::Assistant,
                     content: vec![MessageContent::ToolUse(tool_use)],
                     cache: false,
+                    reasoning_details: None,
                 }],
                 ..Default::default()
             },
@@ -1218,6 +1221,7 @@ mod tests {
                     role: Role::Assistant,
                     content: vec![MessageContent::ToolUse(tool_use)],
                     cache: false,
+                    reasoning_details: None,
                 }],
                 ..Default::default()
             },

crates/language_models/src/provider/mistral.rs πŸ”—

@@ -1025,11 +1025,13 @@ mod tests {
                     role: Role::System,
                     content: vec![MessageContent::Text("System prompt".into())],
                     cache: false,
+                    reasoning_details: None,
                 },
                 LanguageModelRequestMessage {
                     role: Role::User,
                     content: vec![MessageContent::Text("Hello".into())],
                     cache: false,
+                    reasoning_details: None,
                 },
             ],
             temperature: Some(0.5),
@@ -1064,6 +1066,7 @@ mod tests {
                     }),
                 ],
                 cache: false,
+                reasoning_details: None,
             }],
             tools: vec![],
             tool_choice: None,

crates/language_models/src/provider/open_router.rs πŸ”—

@@ -393,6 +393,7 @@ pub fn into_open_router(
 ) -> open_router::Request {
     let mut messages = Vec::new();
     for message in request.messages {
+        let reasoning_details = message.reasoning_details.clone();
         for content in message.content {
             match content {
                 MessageContent::Text(text) => add_message_content_part(
@@ -419,18 +420,26 @@ pub fn into_open_router(
                                 name: tool_use.name.to_string(),
                                 arguments: serde_json::to_string(&tool_use.input)
                                     .unwrap_or_default(),
+                                thought_signature: tool_use.thought_signature.clone(),
                             },
                         },
                     };
 
-                    if let Some(open_router::RequestMessage::Assistant { tool_calls, .. }) =
-                        messages.last_mut()
+                    if let Some(open_router::RequestMessage::Assistant {
+                        tool_calls,
+                        reasoning_details: existing_reasoning,
+                        ..
+                    }) = messages.last_mut()
                     {
                         tool_calls.push(tool_call);
+                        if existing_reasoning.is_none() && reasoning_details.is_some() {
+                            *existing_reasoning = reasoning_details.clone();
+                        }
                     } else {
                         messages.push(open_router::RequestMessage::Assistant {
                             content: None,
                             tool_calls: vec![tool_call],
+                            reasoning_details: reasoning_details.clone(),
                         });
                     }
                 }
@@ -529,6 +538,7 @@ fn add_message_content_part(
                 Role::Assistant => open_router::RequestMessage::Assistant {
                     content: Some(open_router::MessageContent::from(vec![new_part])),
                     tool_calls: Vec::new(),
+                    reasoning_details: None,
                 },
                 Role::System => open_router::RequestMessage::System {
                     content: open_router::MessageContent::from(vec![new_part]),
@@ -540,12 +550,14 @@ fn add_message_content_part(
 
 pub struct OpenRouterEventMapper {
     tool_calls_by_index: HashMap<usize, RawToolCall>,
+    reasoning_details: Option<serde_json::Value>,
 }
 
 impl OpenRouterEventMapper {
     pub fn new() -> Self {
         Self {
             tool_calls_by_index: HashMap::default(),
+            reasoning_details: None,
         }
     }
 
@@ -577,6 +589,15 @@ impl OpenRouterEventMapper {
         };
 
         let mut events = Vec::new();
+
+        if let Some(details) = choice.delta.reasoning_details.clone() {
+            // Emit reasoning_details immediately
+            events.push(Ok(LanguageModelCompletionEvent::ReasoningDetails(
+                details.clone(),
+            )));
+            self.reasoning_details = Some(details);
+        }
+
         if let Some(reasoning) = choice.delta.reasoning.clone() {
             events.push(Ok(LanguageModelCompletionEvent::Thinking {
                 text: reasoning,
@@ -608,6 +629,10 @@ impl OpenRouterEventMapper {
                     if let Some(arguments) = function.arguments.clone() {
                         entry.arguments.push_str(&arguments);
                     }
+
+                    if let Some(signature) = function.thought_signature.clone() {
+                        entry.thought_signature = Some(signature);
+                    }
                 }
             }
         }
@@ -623,6 +648,7 @@ impl OpenRouterEventMapper {
 
         match choice.finish_reason.as_deref() {
             Some("stop") => {
+                // Don't emit reasoning_details here - already emitted immediately when captured
                 events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::EndTurn)));
             }
             Some("tool_calls") => {
@@ -635,7 +661,7 @@ impl OpenRouterEventMapper {
                                 is_input_complete: true,
                                 input,
                                 raw_input: tool_call.arguments.clone(),
-                                thought_signature: None,
+                                thought_signature: tool_call.thought_signature.clone(),
                             },
                         )),
                         Err(error) => Ok(LanguageModelCompletionEvent::ToolUseJsonParseError {
@@ -647,10 +673,12 @@ impl OpenRouterEventMapper {
                     }
                 }));
 
+                // Don't emit reasoning_details here - already emitted immediately when captured
                 events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::ToolUse)));
             }
             Some(stop_reason) => {
                 log::error!("Unexpected OpenRouter stop_reason: {stop_reason:?}",);
+                // Don't emit reasoning_details here - already emitted immediately when captured
                 events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::EndTurn)));
             }
             None => {}
@@ -665,6 +693,7 @@ struct RawToolCall {
     id: String,
     name: String,
     arguments: String,
+    thought_signature: Option<String>,
 }
 
 pub fn count_open_router_tokens(
@@ -832,3 +861,235 @@ impl Render for ConfigurationView {
         }
     }
 }
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    use open_router::{ChoiceDelta, FunctionChunk, ResponseMessageDelta, ToolCallChunk};
+
+    #[gpui::test]
+    async fn test_reasoning_details_preservation_with_tool_calls() {
+        // This test verifies that reasoning_details are properly captured and preserved
+        // when a model uses tool calling with reasoning/thinking tokens.
+        //
+        // The key regression this prevents:
+        // - OpenRouter sends multiple reasoning_details updates during streaming
+        // - First with actual content (encrypted reasoning data)
+        // - Then with empty array on completion
+        // - We must NOT overwrite the real data with the empty array
+
+        let mut mapper = OpenRouterEventMapper::new();
+
+        // Simulate the streaming events as they come from OpenRouter/Gemini
+        let events = vec![
+            // Event 1: Initial reasoning details with text
+            ResponseStreamEvent {
+                id: Some("response_123".into()),
+                created: 1234567890,
+                model: "google/gemini-3-pro-preview".into(),
+                choices: vec![ChoiceDelta {
+                    index: 0,
+                    delta: ResponseMessageDelta {
+                        role: None,
+                        content: None,
+                        reasoning: None,
+                        tool_calls: None,
+                        reasoning_details: Some(serde_json::json!([
+                            {
+                                "type": "reasoning.text",
+                                "text": "Let me analyze this request...",
+                                "format": "google-gemini-v1",
+                                "index": 0
+                            }
+                        ])),
+                    },
+                    finish_reason: None,
+                }],
+                usage: None,
+            },
+            // Event 2: More reasoning details
+            ResponseStreamEvent {
+                id: Some("response_123".into()),
+                created: 1234567890,
+                model: "google/gemini-3-pro-preview".into(),
+                choices: vec![ChoiceDelta {
+                    index: 0,
+                    delta: ResponseMessageDelta {
+                        role: None,
+                        content: None,
+                        reasoning: None,
+                        tool_calls: None,
+                        reasoning_details: Some(serde_json::json!([
+                            {
+                                "type": "reasoning.encrypted",
+                                "data": "EtgDCtUDAdHtim9OF5jm4aeZSBAtl/randomized123",
+                                "format": "google-gemini-v1",
+                                "index": 0,
+                                "id": "tool_call_abc123"
+                            }
+                        ])),
+                    },
+                    finish_reason: None,
+                }],
+                usage: None,
+            },
+            // Event 3: Tool call starts
+            ResponseStreamEvent {
+                id: Some("response_123".into()),
+                created: 1234567890,
+                model: "google/gemini-3-pro-preview".into(),
+                choices: vec![ChoiceDelta {
+                    index: 0,
+                    delta: ResponseMessageDelta {
+                        role: None,
+                        content: None,
+                        reasoning: None,
+                        tool_calls: Some(vec![ToolCallChunk {
+                            index: 0,
+                            id: Some("tool_call_abc123".into()),
+                            function: Some(FunctionChunk {
+                                name: Some("list_directory".into()),
+                                arguments: Some("{\"path\":\"test\"}".into()),
+                                thought_signature: Some("sha256:test_signature_xyz789".into()),
+                            }),
+                        }]),
+                        reasoning_details: None,
+                    },
+                    finish_reason: None,
+                }],
+                usage: None,
+            },
+            // Event 4: Empty reasoning_details on tool_calls finish
+            // This is the critical event - we must not overwrite with this empty array!
+            ResponseStreamEvent {
+                id: Some("response_123".into()),
+                created: 1234567890,
+                model: "google/gemini-3-pro-preview".into(),
+                choices: vec![ChoiceDelta {
+                    index: 0,
+                    delta: ResponseMessageDelta {
+                        role: None,
+                        content: None,
+                        reasoning: None,
+                        tool_calls: None,
+                        reasoning_details: Some(serde_json::json!([])),
+                    },
+                    finish_reason: Some("tool_calls".into()),
+                }],
+                usage: None,
+            },
+        ];
+
+        // Process all events
+        let mut collected_events = Vec::new();
+        for event in events {
+            let mapped = mapper.map_event(event);
+            collected_events.extend(mapped);
+        }
+
+        // Verify we got the expected events
+        let mut has_tool_use = false;
+        let mut reasoning_details_events = Vec::new();
+        let mut thought_signature_value = None;
+
+        for event_result in collected_events {
+            match event_result {
+                Ok(LanguageModelCompletionEvent::ToolUse(tool_use)) => {
+                    has_tool_use = true;
+                    assert_eq!(tool_use.id.to_string(), "tool_call_abc123");
+                    assert_eq!(tool_use.name.as_ref(), "list_directory");
+                    thought_signature_value = tool_use.thought_signature.clone();
+                }
+                Ok(LanguageModelCompletionEvent::ReasoningDetails(details)) => {
+                    reasoning_details_events.push(details);
+                }
+                _ => {}
+            }
+        }
+
+        // Assertions
+        assert!(has_tool_use, "Should have emitted ToolUse event");
+        assert!(
+            !reasoning_details_events.is_empty(),
+            "Should have emitted ReasoningDetails events"
+        );
+
+        // We should have received multiple reasoning_details events (text, encrypted, empty)
+        // The agent layer is responsible for keeping only the first non-empty one
+        assert!(
+            reasoning_details_events.len() >= 2,
+            "Should have multiple reasoning_details events from streaming"
+        );
+
+        // Verify at least one contains the encrypted data
+        let has_encrypted = reasoning_details_events.iter().any(|details| {
+            if let serde_json::Value::Array(arr) = details {
+                arr.iter().any(|item| {
+                    item["type"] == "reasoning.encrypted"
+                        && item["data"]
+                            .as_str()
+                            .map_or(false, |s| s.contains("EtgDCtUDAdHtim9OF5jm4aeZSBAtl"))
+                })
+            } else {
+                false
+            }
+        });
+        assert!(
+            has_encrypted,
+            "Should have at least one reasoning_details with encrypted data"
+        );
+
+        // Verify thought_signature was captured
+        assert!(
+            thought_signature_value.is_some(),
+            "Tool use should have thought_signature"
+        );
+        assert_eq!(
+            thought_signature_value.unwrap(),
+            "sha256:test_signature_xyz789"
+        );
+    }
+
+    #[gpui::test]
+    async fn test_agent_prevents_empty_reasoning_details_overwrite() {
+        // This test verifies that the agent layer prevents empty reasoning_details
+        // from overwriting non-empty ones, even though the mapper emits all events.
+
+        // Simulate what the agent does when it receives multiple ReasoningDetails events
+        let mut agent_reasoning_details: Option<serde_json::Value> = None;
+
+        let events = vec![
+            // First event: non-empty reasoning_details
+            serde_json::json!([
+                {
+                    "type": "reasoning.encrypted",
+                    "data": "real_data_here",
+                    "format": "google-gemini-v1"
+                }
+            ]),
+            // Second event: empty array (should not overwrite)
+            serde_json::json!([]),
+        ];
+
+        for details in events {
+            // This mimics the agent's logic: only store if we don't already have it
+            if agent_reasoning_details.is_none() {
+                agent_reasoning_details = Some(details);
+            }
+        }
+
+        // Verify the agent kept the first non-empty reasoning_details
+        assert!(agent_reasoning_details.is_some());
+        let final_details = agent_reasoning_details.unwrap();
+        if let serde_json::Value::Array(arr) = &final_details {
+            assert!(
+                !arr.is_empty(),
+                "Agent should have kept the non-empty reasoning_details"
+            );
+            assert_eq!(arr[0]["data"], "real_data_here");
+        } else {
+            panic!("Expected array");
+        }
+    }
+}

crates/open_router/src/open_router.rs πŸ”—

@@ -215,6 +215,8 @@ pub enum RequestMessage {
         content: Option<MessageContent>,
         #[serde(default, skip_serializing_if = "Vec::is_empty")]
         tool_calls: Vec<ToolCall>,
+        #[serde(default, skip_serializing_if = "Option::is_none")]
+        reasoning_details: Option<serde_json::Value>,
     },
     User {
         content: MessageContent,
@@ -341,6 +343,8 @@ pub enum ToolCallContent {
 pub struct FunctionContent {
     pub name: String,
     pub arguments: String,
+    #[serde(default, skip_serializing_if = "Option::is_none")]
+    pub thought_signature: Option<String>,
 }
 
 #[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
@@ -350,6 +354,8 @@ pub struct ResponseMessageDelta {
     pub reasoning: Option<String>,
     #[serde(default, skip_serializing_if = "is_none_or_empty")]
     pub tool_calls: Option<Vec<ToolCallChunk>>,
+    #[serde(default, skip_serializing_if = "Option::is_none")]
+    pub reasoning_details: Option<serde_json::Value>,
 }
 
 #[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
@@ -363,6 +369,8 @@ pub struct ToolCallChunk {
 pub struct FunctionChunk {
     pub name: Option<String>,
     pub arguments: Option<String>,
+    #[serde(default)]
+    pub thought_signature: Option<String>,
 }
 
 #[derive(Serialize, Deserialize, Debug)]