diff --git a/crates/deepseek/src/deepseek.rs b/crates/deepseek/src/deepseek.rs index 64a1cbe5d96354260c2bf84a43ed70be7336aa7a..e978aa08048bfa4c7b7b203ce6b405ba8a0a7d0c 100644 --- a/crates/deepseek/src/deepseek.rs +++ b/crates/deepseek/src/deepseek.rs @@ -155,6 +155,8 @@ pub enum RequestMessage { content: Option, #[serde(default, skip_serializing_if = "Vec::is_empty")] tool_calls: Vec, + #[serde(default, skip_serializing_if = "Option::is_none")] + reasoning_content: Option, }, User { content: String, diff --git a/crates/language_models/src/provider/deepseek.rs b/crates/language_models/src/provider/deepseek.rs index 4bc7164f421bfbaa075c72faff7f731c0defcdba..91b83bb9f1d0f08fe70f5e750ff8ce993a7afd7f 100644 --- a/crates/language_models/src/provider/deepseek.rs +++ b/crates/language_models/src/provider/deepseek.rs @@ -332,9 +332,11 @@ pub fn into_deepseek( model: &deepseek::Model, max_output_tokens: Option, ) -> deepseek::Request { - let is_reasoner = *model == deepseek::Model::Reasoner; + let is_reasoner = model == &deepseek::Model::Reasoner; let mut messages = Vec::new(); + let mut current_reasoning: Option = None; + for message in request.messages { for content in message.content { match content { @@ -343,10 +345,14 @@ pub fn into_deepseek( Role::Assistant => deepseek::RequestMessage::Assistant { content: Some(text), tool_calls: Vec::new(), + reasoning_content: current_reasoning.take(), }, Role::System => deepseek::RequestMessage::System { content: text }, }), - MessageContent::Thinking { .. } => {} + MessageContent::Thinking { text, .. } => { + // Accumulate reasoning content for next assistant message + current_reasoning.get_or_insert_default().push_str(&text); + } MessageContent::RedactedThinking(_) => {} MessageContent::Image(_) => {} MessageContent::ToolUse(tool_use) => { @@ -369,6 +375,7 @@ pub fn into_deepseek( messages.push(deepseek::RequestMessage::Assistant { content: None, tool_calls: vec![tool_call], + reasoning_content: current_reasoning.take(), }); } }