Detailed changes
@@ -182,6 +182,7 @@ impl DbThread {
crate::Message::Agent(AgentMessage {
content,
tool_results,
+ reasoning_details: None,
})
}
language_model::Role::System => {
@@ -703,6 +703,7 @@ impl EditAgent {
role: Role::User,
content: vec![MessageContent::Text(prompt)],
cache: false,
+ reasoning_details: None,
});
// Include tools in the request so that we can take advantage of
@@ -1081,6 +1081,7 @@ fn message(
role,
content: contents.into_iter().collect(),
cache: false,
+ reasoning_details: None,
}
}
@@ -1268,6 +1269,7 @@ impl EvalAssertion {
role: Role::User,
content: vec![prompt.into()],
cache: false,
+ reasoning_details: None,
}],
thinking_allowed: true,
..Default::default()
@@ -1594,6 +1596,7 @@ impl EditAgentTest {
role: Role::System,
content: vec![MessageContent::Text(system_prompt)],
cache: true,
+ reasoning_details: None,
}]
.into_iter()
.chain(eval.conversation)
@@ -215,7 +215,8 @@ async fn test_prompt_caching(cx: &mut TestAppContext) {
vec![LanguageModelRequestMessage {
role: Role::User,
content: vec!["Message 1".into()],
- cache: true
+ cache: true,
+ reasoning_details: None,
}]
);
fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::Text(
@@ -239,17 +240,20 @@ async fn test_prompt_caching(cx: &mut TestAppContext) {
LanguageModelRequestMessage {
role: Role::User,
content: vec!["Message 1".into()],
- cache: false
+ cache: false,
+ reasoning_details: None,
},
LanguageModelRequestMessage {
role: Role::Assistant,
content: vec!["Response to Message 1".into()],
- cache: false
+ cache: false,
+ reasoning_details: None,
},
LanguageModelRequestMessage {
role: Role::User,
content: vec!["Message 2".into()],
- cache: true
+ cache: true,
+ reasoning_details: None,
}
]
);
@@ -295,37 +299,44 @@ async fn test_prompt_caching(cx: &mut TestAppContext) {
LanguageModelRequestMessage {
role: Role::User,
content: vec!["Message 1".into()],
- cache: false
+ cache: false,
+ reasoning_details: None,
},
LanguageModelRequestMessage {
role: Role::Assistant,
content: vec!["Response to Message 1".into()],
- cache: false
+ cache: false,
+ reasoning_details: None,
},
LanguageModelRequestMessage {
role: Role::User,
content: vec!["Message 2".into()],
- cache: false
+ cache: false,
+ reasoning_details: None,
},
LanguageModelRequestMessage {
role: Role::Assistant,
content: vec!["Response to Message 2".into()],
- cache: false
+ cache: false,
+ reasoning_details: None,
},
LanguageModelRequestMessage {
role: Role::User,
content: vec!["Use the echo tool".into()],
- cache: false
+ cache: false,
+ reasoning_details: None,
},
LanguageModelRequestMessage {
role: Role::Assistant,
content: vec![MessageContent::ToolUse(tool_use)],
- cache: false
+ cache: false,
+ reasoning_details: None,
},
LanguageModelRequestMessage {
role: Role::User,
content: vec![MessageContent::ToolResult(tool_result)],
- cache: true
+ cache: true,
+ reasoning_details: None,
}
]
);
@@ -648,17 +659,20 @@ async fn test_resume_after_tool_use_limit(cx: &mut TestAppContext) {
LanguageModelRequestMessage {
role: Role::User,
content: vec!["abc".into()],
- cache: false
+ cache: false,
+ reasoning_details: None,
},
LanguageModelRequestMessage {
role: Role::Assistant,
content: vec![MessageContent::ToolUse(tool_use.clone())],
- cache: false
+ cache: false,
+ reasoning_details: None,
},
LanguageModelRequestMessage {
role: Role::User,
content: vec![MessageContent::ToolResult(tool_result.clone())],
- cache: true
+ cache: true,
+ reasoning_details: None,
},
]
);
@@ -682,22 +696,26 @@ async fn test_resume_after_tool_use_limit(cx: &mut TestAppContext) {
LanguageModelRequestMessage {
role: Role::User,
content: vec!["abc".into()],
- cache: false
+ cache: false,
+ reasoning_details: None,
},
LanguageModelRequestMessage {
role: Role::Assistant,
content: vec![MessageContent::ToolUse(tool_use)],
- cache: false
+ cache: false,
+ reasoning_details: None,
},
LanguageModelRequestMessage {
role: Role::User,
content: vec![MessageContent::ToolResult(tool_result)],
- cache: false
+ cache: false,
+ reasoning_details: None,
},
LanguageModelRequestMessage {
role: Role::User,
content: vec!["Continue where you left off".into()],
- cache: true
+ cache: true,
+ reasoning_details: None,
}
]
);
@@ -769,22 +787,26 @@ async fn test_send_after_tool_use_limit(cx: &mut TestAppContext) {
LanguageModelRequestMessage {
role: Role::User,
content: vec!["abc".into()],
- cache: false
+ cache: false,
+ reasoning_details: None,
},
LanguageModelRequestMessage {
role: Role::Assistant,
content: vec![MessageContent::ToolUse(tool_use)],
- cache: false
+ cache: false,
+ reasoning_details: None,
},
LanguageModelRequestMessage {
role: Role::User,
content: vec![MessageContent::ToolResult(tool_result)],
- cache: false
+ cache: false,
+ reasoning_details: None,
},
LanguageModelRequestMessage {
role: Role::User,
content: vec!["ghi".into()],
- cache: true
+ cache: true,
+ reasoning_details: None,
}
]
);
@@ -1827,7 +1849,8 @@ async fn test_building_request_with_pending_tools(cx: &mut TestAppContext) {
LanguageModelRequestMessage {
role: Role::User,
content: vec!["Hey!".into()],
- cache: true
+ cache: true,
+ reasoning_details: None,
},
LanguageModelRequestMessage {
role: Role::Assistant,
@@ -1835,7 +1858,8 @@ async fn test_building_request_with_pending_tools(cx: &mut TestAppContext) {
MessageContent::Text("Hi!".into()),
MessageContent::ToolUse(echo_tool_use.clone())
],
- cache: false
+ cache: false,
+ reasoning_details: None,
},
LanguageModelRequestMessage {
role: Role::User,
@@ -1846,7 +1870,8 @@ async fn test_building_request_with_pending_tools(cx: &mut TestAppContext) {
content: "test".into(),
output: Some("test".into())
})],
- cache: false
+ cache: false,
+ reasoning_details: None,
},
],
);
@@ -2244,12 +2269,14 @@ async fn test_send_retry_finishes_tool_calls_on_error(cx: &mut TestAppContext) {
LanguageModelRequestMessage {
role: Role::User,
content: vec!["Call the echo tool!".into()],
- cache: false
+ cache: false,
+ reasoning_details: None,
},
LanguageModelRequestMessage {
role: Role::Assistant,
content: vec![language_model::MessageContent::ToolUse(tool_use_1.clone())],
- cache: false
+ cache: false,
+ reasoning_details: None,
},
LanguageModelRequestMessage {
role: Role::User,
@@ -2262,7 +2289,8 @@ async fn test_send_retry_finishes_tool_calls_on_error(cx: &mut TestAppContext) {
output: Some("test".into())
}
)],
- cache: true
+ cache: true,
+ reasoning_details: None,
},
]
);
@@ -2276,7 +2304,8 @@ async fn test_send_retry_finishes_tool_calls_on_error(cx: &mut TestAppContext) {
thread.last_message(),
Some(Message::Agent(AgentMessage {
content: vec![AgentMessageContent::Text("Done".into())],
- tool_results: IndexMap::default()
+ tool_results: IndexMap::default(),
+ reasoning_details: None,
}))
);
})
@@ -113,6 +113,7 @@ impl Message {
role: Role::User,
content: vec!["Continue where you left off".into()],
cache: false,
+ reasoning_details: None,
}],
}
}
@@ -177,6 +178,7 @@ impl UserMessage {
role: Role::User,
content: Vec::with_capacity(self.content.len()),
cache: false,
+ reasoning_details: None,
};
const OPEN_CONTEXT: &str = "<context>\n\
@@ -444,6 +446,7 @@ impl AgentMessage {
role: Role::Assistant,
content: Vec::with_capacity(self.content.len()),
cache: false,
+ reasoning_details: self.reasoning_details.clone(),
};
for chunk in &self.content {
match chunk {
@@ -479,6 +482,7 @@ impl AgentMessage {
role: Role::User,
content: Vec::new(),
cache: false,
+ reasoning_details: None,
};
for tool_result in self.tool_results.values() {
@@ -508,6 +512,7 @@ impl AgentMessage {
pub struct AgentMessage {
pub content: Vec<AgentMessageContent>,
pub tool_results: IndexMap<LanguageModelToolUseId, LanguageModelToolResult>,
+ pub reasoning_details: Option<serde_json::Value>,
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
@@ -1398,6 +1403,18 @@ impl Thread {
self.handle_thinking_event(text, signature, event_stream, cx)
}
RedactedThinking { data } => self.handle_redacted_thinking_event(data, cx),
+ ReasoningDetails(details) => {
+ let last_message = self.pending_message();
+ // Store the last non-empty reasoning_details (overwrites earlier ones)
+ // This ensures we keep the encrypted reasoning with signatures, not the early text reasoning
+ if let serde_json::Value::Array(ref arr) = details {
+ if !arr.is_empty() {
+ last_message.reasoning_details = Some(details);
+ }
+ } else {
+ last_message.reasoning_details = Some(details);
+ }
+ }
ToolUse(tool_use) => {
return Ok(self.handle_tool_use_event(tool_use, event_stream, cx));
}
@@ -1673,6 +1690,7 @@ impl Thread {
role: Role::User,
content: vec![SUMMARIZE_THREAD_DETAILED_PROMPT.into()],
cache: false,
+ reasoning_details: None,
});
let task = cx
@@ -1737,6 +1755,7 @@ impl Thread {
role: Role::User,
content: vec![SUMMARIZE_THREAD_PROMPT.into()],
cache: false,
+ reasoning_details: None,
});
self.pending_title_generation = Some(cx.spawn(async move |this, cx| {
let mut title = String::new();
@@ -1984,6 +2003,7 @@ impl Thread {
role: Role::System,
content: vec![system_prompt.into()],
cache: false,
+ reasoning_details: None,
}];
for message in &self.messages {
messages.extend(message.to_request());
@@ -423,6 +423,7 @@ impl CodegenAlternative {
role: Role::User,
content: Vec::new(),
cache: false,
+ reasoning_details: None,
};
if let Some(context) = context_task.await {
@@ -262,6 +262,7 @@ impl TerminalInlineAssistant {
role: Role::User,
content: vec![],
cache: false,
+ reasoning_details: None,
};
if let Some(context) = load_context_task.await {
@@ -1417,6 +1417,7 @@ impl TextThread {
role: Role::User,
content: vec!["Respond only with OK, nothing else.".into()],
cache: false,
+ reasoning_details: None,
});
req
};
@@ -2085,6 +2086,11 @@ impl TextThread {
);
}
LanguageModelCompletionEvent::StartMessage { .. } => {}
+ LanguageModelCompletionEvent::ReasoningDetails(_) => {
+ // ReasoningDetails are metadata (signatures, encrypted data, format info)
+ // used for request/response validation, not UI content.
+ // The displayable thinking text is already handled by the Thinking event.
+ }
LanguageModelCompletionEvent::Stop(reason) => {
stop_reason = reason;
}
@@ -2308,6 +2314,7 @@ impl TextThread {
role: message.role,
content: Vec::new(),
cache: message.cache.as_ref().is_some_and(|cache| cache.is_anchor),
+ reasoning_details: None,
};
while let Some(content) = contents.peek() {
@@ -2679,6 +2686,7 @@ impl TextThread {
role: Role::User,
content: vec![SUMMARIZE_THREAD_PROMPT.into()],
cache: false,
+ reasoning_details: None,
});
// If there is no summary, it is set with `done: false` so that "Loading Summaryβ¦" can
@@ -353,6 +353,8 @@ pub enum ToolCallContent {
pub struct FunctionContent {
pub name: String,
pub arguments: String,
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub thought_signature: Option<String>,
}
#[derive(Deserialize, Debug)]
@@ -396,6 +398,7 @@ pub struct ToolCallChunk {
pub struct FunctionChunk {
pub name: Option<String>,
pub arguments: Option<String>,
+ pub thought_signature: Option<String>,
}
#[derive(Deserialize)]
@@ -127,6 +127,8 @@ pub enum ResponseInputItem {
arguments: String,
#[serde(skip_serializing_if = "Option::is_none")]
status: Option<ItemStatus>,
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ thought_signature: Option<String>,
},
FunctionCallOutput {
call_id: String,
@@ -251,6 +253,8 @@ pub enum ResponseOutputItem {
arguments: String,
#[serde(skip_serializing_if = "Option::is_none")]
status: Option<ItemStatus>,
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ thought_signature: Option<String>,
},
Reasoning {
id: String,
@@ -553,6 +553,7 @@ impl ExampleInstance {
role: Role::User,
content: vec![MessageContent::Text(to_prompt(assertion.description))],
cache: false,
+ reasoning_details: None,
}],
temperature: None,
tools: Vec::new(),
@@ -1255,7 +1256,8 @@ pub fn response_events_to_markdown(
| LanguageModelCompletionEvent::StartMessage { .. }
| LanguageModelCompletionEvent::UsageUpdated { .. }
| LanguageModelCompletionEvent::Queued { .. }
- | LanguageModelCompletionEvent::Started,
+ | LanguageModelCompletionEvent::Started
+ | LanguageModelCompletionEvent::ReasoningDetails(_),
) => {}
Ok(LanguageModelCompletionEvent::ToolUseJsonParseError {
json_parse_error, ..
@@ -1341,6 +1343,7 @@ impl ThreadDialog {
Ok(LanguageModelCompletionEvent::UsageUpdate(_))
| Ok(LanguageModelCompletionEvent::RedactedThinking { .. })
| Ok(LanguageModelCompletionEvent::StartMessage { .. })
+ | Ok(LanguageModelCompletionEvent::ReasoningDetails(_))
| Ok(LanguageModelCompletionEvent::Stop(_))
| Ok(LanguageModelCompletionEvent::Queued { .. })
| Ok(LanguageModelCompletionEvent::Started)
@@ -1372,6 +1375,7 @@ impl ThreadDialog {
role: Role::Assistant,
content,
cache: false,
+ reasoning_details: None,
})
} else {
None
@@ -2051,6 +2051,7 @@ impl GitPanel {
role: Role::User,
content: vec![content.into()],
cache: false,
+ reasoning_details: None,
}],
tools: Vec::new(),
tool_choice: None,
@@ -98,6 +98,7 @@ pub enum LanguageModelCompletionEvent {
StartMessage {
message_id: String,
},
+ ReasoningDetails(serde_json::Value),
UsageUpdate(TokenUsage),
}
@@ -680,6 +681,7 @@ pub trait LanguageModel: Send + Sync {
Ok(LanguageModelCompletionEvent::Text(text)) => Some(Ok(text)),
Ok(LanguageModelCompletionEvent::Thinking { .. }) => None,
Ok(LanguageModelCompletionEvent::RedactedThinking { .. }) => None,
+ Ok(LanguageModelCompletionEvent::ReasoningDetails(_)) => None,
Ok(LanguageModelCompletionEvent::Stop(_)) => None,
Ok(LanguageModelCompletionEvent::ToolUse(_)) => None,
Ok(LanguageModelCompletionEvent::ToolUseJsonParseError {
@@ -1034,8 +1036,8 @@ mod tests {
let original = LanguageModelToolUse {
id: LanguageModelToolUseId::from("no_sig_id"),
name: "no_sig_tool".into(),
- raw_input: json!({"key": "value"}).to_string(),
- input: json!({"key": "value"}),
+ raw_input: json!({"arg": "value"}).to_string(),
+ input: json!({"arg": "value"}),
is_input_complete: true,
thought_signature: None,
};
@@ -357,6 +357,8 @@ pub struct LanguageModelRequestMessage {
pub role: Role,
pub content: Vec<MessageContent>,
pub cache: bool,
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub reasoning_details: Option<serde_json::Value>,
}
impl LanguageModelRequestMessage {
@@ -989,6 +989,7 @@ mod tests {
MessageContent::Image(language_model::LanguageModelImage::empty()),
],
cache: true,
+ reasoning_details: None,
}],
thread_id: None,
prompt_id: None,
@@ -361,6 +361,7 @@ pub fn map_to_language_model_completion_events(
id: String,
name: String,
arguments: String,
+ thought_signature: Option<String>,
}
struct State {
@@ -418,6 +419,11 @@ pub fn map_to_language_model_completion_events(
if let Some(arguments) = function.arguments.clone() {
entry.arguments.push_str(&arguments);
}
+
+ if let Some(thought_signature) = function.thought_signature.clone()
+ {
+ entry.thought_signature = Some(thought_signature);
+ }
}
}
@@ -458,7 +464,7 @@ pub fn map_to_language_model_completion_events(
is_input_complete: true,
input,
raw_input: tool_call.arguments,
- thought_signature: None,
+ thought_signature: tool_call.thought_signature,
},
)),
Err(error) => Ok(
@@ -550,6 +556,7 @@ impl CopilotResponsesEventMapper {
call_id,
name,
arguments,
+ thought_signature,
..
} => {
let mut events = Vec::new();
@@ -561,7 +568,7 @@ impl CopilotResponsesEventMapper {
is_input_complete: true,
input,
raw_input: arguments.clone(),
- thought_signature: None,
+ thought_signature,
},
))),
Err(error) => {
@@ -776,6 +783,7 @@ fn into_copilot_chat(
function: copilot::copilot_chat::FunctionContent {
name: tool_use.name.to_string(),
arguments: serde_json::to_string(&tool_use.input)?,
+ thought_signature: tool_use.thought_signature.clone(),
},
},
});
@@ -950,6 +958,7 @@ fn into_copilot_responses(
name: tool_use.name.to_string(),
arguments: tool_use.raw_input.clone(),
status: None,
+ thought_signature: tool_use.thought_signature.clone(),
});
}
}
@@ -1122,6 +1131,7 @@ mod tests {
name: "do_it".into(),
arguments: "{\"x\":1}".into(),
status: None,
+ thought_signature: None,
},
}];
@@ -1147,6 +1157,7 @@ mod tests {
name: "do_it".into(),
arguments: "{not json}".into(),
status: None,
+ thought_signature: None,
},
}];
@@ -1250,6 +1261,7 @@ mod tests {
name: "do_it".into(),
arguments: "{}".into(),
status: None,
+ thought_signature: None,
},
},
responses::StreamEvent::Completed {
@@ -1094,6 +1094,7 @@ mod tests {
role: Role::Assistant,
content: vec![MessageContent::ToolUse(tool_use)],
cache: false,
+ reasoning_details: None,
}],
..Default::default()
},
@@ -1130,6 +1131,7 @@ mod tests {
role: Role::Assistant,
content: vec![MessageContent::ToolUse(tool_use)],
cache: false,
+ reasoning_details: None,
}],
..Default::default()
},
@@ -1162,6 +1164,7 @@ mod tests {
role: Role::Assistant,
content: vec![MessageContent::ToolUse(tool_use)],
cache: false,
+ reasoning_details: None,
}],
..Default::default()
},
@@ -1218,6 +1221,7 @@ mod tests {
role: Role::Assistant,
content: vec![MessageContent::ToolUse(tool_use)],
cache: false,
+ reasoning_details: None,
}],
..Default::default()
},
@@ -1025,11 +1025,13 @@ mod tests {
role: Role::System,
content: vec![MessageContent::Text("System prompt".into())],
cache: false,
+ reasoning_details: None,
},
LanguageModelRequestMessage {
role: Role::User,
content: vec![MessageContent::Text("Hello".into())],
cache: false,
+ reasoning_details: None,
},
],
temperature: Some(0.5),
@@ -1064,6 +1066,7 @@ mod tests {
}),
],
cache: false,
+ reasoning_details: None,
}],
tools: vec![],
tool_choice: None,
@@ -882,6 +882,7 @@ mod tests {
role: Role::User,
content: vec![MessageContent::Text("message".into())],
cache: false,
+ reasoning_details: None,
}],
tools: vec![],
tool_choice: None,
@@ -393,6 +393,7 @@ pub fn into_open_router(
) -> open_router::Request {
let mut messages = Vec::new();
for message in request.messages {
+ let reasoning_details = message.reasoning_details.clone();
for content in message.content {
match content {
MessageContent::Text(text) => add_message_content_part(
@@ -419,18 +420,26 @@ pub fn into_open_router(
name: tool_use.name.to_string(),
arguments: serde_json::to_string(&tool_use.input)
.unwrap_or_default(),
+ thought_signature: tool_use.thought_signature.clone(),
},
},
};
- if let Some(open_router::RequestMessage::Assistant { tool_calls, .. }) =
- messages.last_mut()
+ if let Some(open_router::RequestMessage::Assistant {
+ tool_calls,
+ reasoning_details: existing_reasoning,
+ ..
+ }) = messages.last_mut()
{
tool_calls.push(tool_call);
+ if existing_reasoning.is_none() && reasoning_details.is_some() {
+ *existing_reasoning = reasoning_details.clone();
+ }
} else {
messages.push(open_router::RequestMessage::Assistant {
content: None,
tool_calls: vec![tool_call],
+ reasoning_details: reasoning_details.clone(),
});
}
}
@@ -529,6 +538,7 @@ fn add_message_content_part(
Role::Assistant => open_router::RequestMessage::Assistant {
content: Some(open_router::MessageContent::from(vec![new_part])),
tool_calls: Vec::new(),
+ reasoning_details: None,
},
Role::System => open_router::RequestMessage::System {
content: open_router::MessageContent::from(vec![new_part]),
@@ -540,12 +550,14 @@ fn add_message_content_part(
pub struct OpenRouterEventMapper {
tool_calls_by_index: HashMap<usize, RawToolCall>,
+ reasoning_details: Option<serde_json::Value>,
}
impl OpenRouterEventMapper {
pub fn new() -> Self {
Self {
tool_calls_by_index: HashMap::default(),
+ reasoning_details: None,
}
}
@@ -577,6 +589,15 @@ impl OpenRouterEventMapper {
};
let mut events = Vec::new();
+
+ if let Some(details) = choice.delta.reasoning_details.clone() {
+ // Emit reasoning_details immediately
+ events.push(Ok(LanguageModelCompletionEvent::ReasoningDetails(
+ details.clone(),
+ )));
+ self.reasoning_details = Some(details);
+ }
+
if let Some(reasoning) = choice.delta.reasoning.clone() {
events.push(Ok(LanguageModelCompletionEvent::Thinking {
text: reasoning,
@@ -608,6 +629,10 @@ impl OpenRouterEventMapper {
if let Some(arguments) = function.arguments.clone() {
entry.arguments.push_str(&arguments);
}
+
+ if let Some(signature) = function.thought_signature.clone() {
+ entry.thought_signature = Some(signature);
+ }
}
}
}
@@ -623,6 +648,7 @@ impl OpenRouterEventMapper {
match choice.finish_reason.as_deref() {
Some("stop") => {
+ // Don't emit reasoning_details here - already emitted immediately when captured
events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::EndTurn)));
}
Some("tool_calls") => {
@@ -635,7 +661,7 @@ impl OpenRouterEventMapper {
is_input_complete: true,
input,
raw_input: tool_call.arguments.clone(),
- thought_signature: None,
+ thought_signature: tool_call.thought_signature.clone(),
},
)),
Err(error) => Ok(LanguageModelCompletionEvent::ToolUseJsonParseError {
@@ -647,10 +673,12 @@ impl OpenRouterEventMapper {
}
}));
+ // Don't emit reasoning_details here - already emitted immediately when captured
events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::ToolUse)));
}
Some(stop_reason) => {
log::error!("Unexpected OpenRouter stop_reason: {stop_reason:?}",);
+ // Don't emit reasoning_details here - already emitted immediately when captured
events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::EndTurn)));
}
None => {}
@@ -665,6 +693,7 @@ struct RawToolCall {
id: String,
name: String,
arguments: String,
+ thought_signature: Option<String>,
}
pub fn count_open_router_tokens(
@@ -832,3 +861,235 @@ impl Render for ConfigurationView {
}
}
}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ use open_router::{ChoiceDelta, FunctionChunk, ResponseMessageDelta, ToolCallChunk};
+
+ #[gpui::test]
+ async fn test_reasoning_details_preservation_with_tool_calls() {
+ // This test verifies that reasoning_details are properly captured and preserved
+ // when a model uses tool calling with reasoning/thinking tokens.
+ //
+ // The key regression this prevents:
+ // - OpenRouter sends multiple reasoning_details updates during streaming
+ // - First with actual content (encrypted reasoning data)
+ // - Then with empty array on completion
+ // - We must NOT overwrite the real data with the empty array
+
+ let mut mapper = OpenRouterEventMapper::new();
+
+ // Simulate the streaming events as they come from OpenRouter/Gemini
+ let events = vec![
+ // Event 1: Initial reasoning details with text
+ ResponseStreamEvent {
+ id: Some("response_123".into()),
+ created: 1234567890,
+ model: "google/gemini-3-pro-preview".into(),
+ choices: vec![ChoiceDelta {
+ index: 0,
+ delta: ResponseMessageDelta {
+ role: None,
+ content: None,
+ reasoning: None,
+ tool_calls: None,
+ reasoning_details: Some(serde_json::json!([
+ {
+ "type": "reasoning.text",
+ "text": "Let me analyze this request...",
+ "format": "google-gemini-v1",
+ "index": 0
+ }
+ ])),
+ },
+ finish_reason: None,
+ }],
+ usage: None,
+ },
+ // Event 2: More reasoning details
+ ResponseStreamEvent {
+ id: Some("response_123".into()),
+ created: 1234567890,
+ model: "google/gemini-3-pro-preview".into(),
+ choices: vec![ChoiceDelta {
+ index: 0,
+ delta: ResponseMessageDelta {
+ role: None,
+ content: None,
+ reasoning: None,
+ tool_calls: None,
+ reasoning_details: Some(serde_json::json!([
+ {
+ "type": "reasoning.encrypted",
+ "data": "EtgDCtUDAdHtim9OF5jm4aeZSBAtl/randomized123",
+ "format": "google-gemini-v1",
+ "index": 0,
+ "id": "tool_call_abc123"
+ }
+ ])),
+ },
+ finish_reason: None,
+ }],
+ usage: None,
+ },
+ // Event 3: Tool call starts
+ ResponseStreamEvent {
+ id: Some("response_123".into()),
+ created: 1234567890,
+ model: "google/gemini-3-pro-preview".into(),
+ choices: vec![ChoiceDelta {
+ index: 0,
+ delta: ResponseMessageDelta {
+ role: None,
+ content: None,
+ reasoning: None,
+ tool_calls: Some(vec![ToolCallChunk {
+ index: 0,
+ id: Some("tool_call_abc123".into()),
+ function: Some(FunctionChunk {
+ name: Some("list_directory".into()),
+ arguments: Some("{\"path\":\"test\"}".into()),
+ thought_signature: Some("sha256:test_signature_xyz789".into()),
+ }),
+ }]),
+ reasoning_details: None,
+ },
+ finish_reason: None,
+ }],
+ usage: None,
+ },
+ // Event 4: Empty reasoning_details on tool_calls finish
+ // This is the critical event - we must not overwrite with this empty array!
+ ResponseStreamEvent {
+ id: Some("response_123".into()),
+ created: 1234567890,
+ model: "google/gemini-3-pro-preview".into(),
+ choices: vec![ChoiceDelta {
+ index: 0,
+ delta: ResponseMessageDelta {
+ role: None,
+ content: None,
+ reasoning: None,
+ tool_calls: None,
+ reasoning_details: Some(serde_json::json!([])),
+ },
+ finish_reason: Some("tool_calls".into()),
+ }],
+ usage: None,
+ },
+ ];
+
+ // Process all events
+ let mut collected_events = Vec::new();
+ for event in events {
+ let mapped = mapper.map_event(event);
+ collected_events.extend(mapped);
+ }
+
+ // Verify we got the expected events
+ let mut has_tool_use = false;
+ let mut reasoning_details_events = Vec::new();
+ let mut thought_signature_value = None;
+
+ for event_result in collected_events {
+ match event_result {
+ Ok(LanguageModelCompletionEvent::ToolUse(tool_use)) => {
+ has_tool_use = true;
+ assert_eq!(tool_use.id.to_string(), "tool_call_abc123");
+ assert_eq!(tool_use.name.as_ref(), "list_directory");
+ thought_signature_value = tool_use.thought_signature.clone();
+ }
+ Ok(LanguageModelCompletionEvent::ReasoningDetails(details)) => {
+ reasoning_details_events.push(details);
+ }
+ _ => {}
+ }
+ }
+
+ // Assertions
+ assert!(has_tool_use, "Should have emitted ToolUse event");
+ assert!(
+ !reasoning_details_events.is_empty(),
+ "Should have emitted ReasoningDetails events"
+ );
+
+ // We should have received multiple reasoning_details events (text, encrypted, empty)
+ // The agent layer is responsible for keeping only the first non-empty one
+ assert!(
+ reasoning_details_events.len() >= 2,
+ "Should have multiple reasoning_details events from streaming"
+ );
+
+ // Verify at least one contains the encrypted data
+ let has_encrypted = reasoning_details_events.iter().any(|details| {
+ if let serde_json::Value::Array(arr) = details {
+ arr.iter().any(|item| {
+ item["type"] == "reasoning.encrypted"
+ && item["data"]
+ .as_str()
+ .map_or(false, |s| s.contains("EtgDCtUDAdHtim9OF5jm4aeZSBAtl"))
+ })
+ } else {
+ false
+ }
+ });
+ assert!(
+ has_encrypted,
+ "Should have at least one reasoning_details with encrypted data"
+ );
+
+ // Verify thought_signature was captured
+ assert!(
+ thought_signature_value.is_some(),
+ "Tool use should have thought_signature"
+ );
+ assert_eq!(
+ thought_signature_value.unwrap(),
+ "sha256:test_signature_xyz789"
+ );
+ }
+
+ #[gpui::test]
+ async fn test_agent_prevents_empty_reasoning_details_overwrite() {
+ // This test verifies that the agent layer prevents empty reasoning_details
+ // from overwriting non-empty ones, even though the mapper emits all events.
+
+ // Simulate what the agent does when it receives multiple ReasoningDetails events
+ let mut agent_reasoning_details: Option<serde_json::Value> = None;
+
+ let events = vec![
+ // First event: non-empty reasoning_details
+ serde_json::json!([
+ {
+ "type": "reasoning.encrypted",
+ "data": "real_data_here",
+ "format": "google-gemini-v1"
+ }
+ ]),
+ // Second event: empty array (should not overwrite)
+ serde_json::json!([]),
+ ];
+
+ for details in events {
+ // This mimics the agent's logic: only store if we don't already have it
+ if agent_reasoning_details.is_none() {
+ agent_reasoning_details = Some(details);
+ }
+ }
+
+ // Verify the agent kept the first non-empty reasoning_details
+ assert!(agent_reasoning_details.is_some());
+ let final_details = agent_reasoning_details.unwrap();
+ if let serde_json::Value::Array(arr) = &final_details {
+ assert!(
+ !arr.is_empty(),
+ "Agent should have kept the non-empty reasoning_details"
+ );
+ assert_eq!(arr[0]["data"], "real_data_here");
+ } else {
+ panic!("Expected array");
+ }
+ }
+}
@@ -215,6 +215,8 @@ pub enum RequestMessage {
content: Option<MessageContent>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
tool_calls: Vec<ToolCall>,
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ reasoning_details: Option<serde_json::Value>,
},
User {
content: MessageContent,
@@ -341,6 +343,8 @@ pub enum ToolCallContent {
pub struct FunctionContent {
pub name: String,
pub arguments: String,
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub thought_signature: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
@@ -350,6 +354,8 @@ pub struct ResponseMessageDelta {
pub reasoning: Option<String>,
#[serde(default, skip_serializing_if = "is_none_or_empty")]
pub tool_calls: Option<Vec<ToolCallChunk>>,
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub reasoning_details: Option<serde_json::Value>,
}
#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
@@ -363,6 +369,8 @@ pub struct ToolCallChunk {
pub struct FunctionChunk {
pub name: Option<String>,
pub arguments: Option<String>,
+ #[serde(default)]
+ pub thought_signature: Option<String>,
}
#[derive(Serialize, Deserialize, Debug)]
@@ -1072,6 +1072,7 @@ impl RulesLibrary {
role: Role::System,
content: vec![body.to_string().into()],
cache: false,
+ reasoning_details: None,
}],
tools: Vec::new(),
tool_choice: None,