@@ -234,7 +234,9 @@ pub struct LanguageModelToolResult {
pub tool_use_id: LanguageModelToolUseId,
pub tool_name: Arc<str>,
pub is_error: bool,
+ /// The tool output formatted for presenting to the model
pub content: LanguageModelToolResultContent,
+ /// The raw tool output, if available, often for debugging or extra state for replay
pub output: Option<serde_json::Value>,
}
@@ -1111,38 +1111,26 @@ fn into_copilot_responses(
Role::User => {
for content in &message.content {
if let MessageContent::ToolResult(tool_result) = content {
- let output = if let Some(out) = &tool_result.output {
- match out {
- serde_json::Value::String(s) => {
- responses::ResponseFunctionOutput::Text(s.clone())
- }
- serde_json::Value::Null => {
- responses::ResponseFunctionOutput::Text(String::new())
- }
- other => responses::ResponseFunctionOutput::Text(other.to_string()),
+ let output = match &tool_result.content {
+ LanguageModelToolResultContent::Text(text) => {
+ responses::ResponseFunctionOutput::Text(text.to_string())
}
- } else {
- match &tool_result.content {
- LanguageModelToolResultContent::Text(text) => {
- responses::ResponseFunctionOutput::Text(text.to_string())
- }
- LanguageModelToolResultContent::Image(image) => {
- if model.supports_vision() {
- responses::ResponseFunctionOutput::Content(vec![
- responses::ResponseInputContent::InputImage {
- image_url: Some(image.to_base64_url()),
- detail: Default::default(),
- },
- ])
- } else {
- debug_panic!(
- "This should be caught at {} level",
- tool_result.tool_name
- );
- responses::ResponseFunctionOutput::Text(
+ LanguageModelToolResultContent::Image(image) => {
+ if model.supports_vision() {
+ responses::ResponseFunctionOutput::Content(vec![
+ responses::ResponseInputContent::InputImage {
+ image_url: Some(image.to_base64_url()),
+ detail: Default::default(),
+ },
+ ])
+ } else {
+ debug_panic!(
+ "This should be caught at {} level",
+ tool_result.tool_name
+ );
+ responses::ResponseFunctionOutput::Text(
"[Tool responded with an image, but this model does not support vision]".into(),
)
- }
}
}
};
@@ -9,9 +9,8 @@ use language_model::{
LanguageModelCompletionEvent, LanguageModelId, LanguageModelImage, LanguageModelName,
LanguageModelProvider, LanguageModelProviderId, LanguageModelProviderName,
LanguageModelProviderState, LanguageModelRequest, LanguageModelRequestMessage,
- LanguageModelToolChoice, LanguageModelToolResult, LanguageModelToolResultContent,
- LanguageModelToolUse, LanguageModelToolUseId, MessageContent, RateLimiter, Role, StopReason,
- TokenUsage, env_var,
+ LanguageModelToolChoice, LanguageModelToolResultContent, LanguageModelToolUse,
+ LanguageModelToolUseId, MessageContent, RateLimiter, Role, StopReason, TokenUsage, env_var,
};
use menu;
use open_ai::responses::{
@@ -647,7 +646,10 @@ fn append_message_to_response_items(
input_items.push(ResponseInputItem::FunctionCallOutput(
ResponseFunctionCallOutputItem {
call_id: tool_result.tool_use_id.to_string(),
- output: tool_result_output(&tool_result),
+ output: match tool_result.content {
+ LanguageModelToolResultContent::Text(text) => text.to_string(),
+ LanguageModelToolResultContent::Image(image) => image.to_base64_url(),
+ },
},
));
}
@@ -715,21 +717,6 @@ fn flush_response_parts(
parts.clear();
}
-fn tool_result_output(result: &LanguageModelToolResult) -> String {
- if let Some(output) = &result.output {
- match output {
- serde_json::Value::String(text) => text.clone(),
- serde_json::Value::Null => String::new(),
- _ => output.to_string(),
- }
- } else {
- match &result.content {
- LanguageModelToolResultContent::Text(text) => text.to_string(),
- LanguageModelToolResultContent::Image(image) => image.to_base64_url(),
- }
- }
-}
-
fn add_message_content_part(
new_part: open_ai::MessagePart,
role: Role,
@@ -1446,7 +1433,9 @@ impl Render for ConfigurationView {
mod tests {
use futures::{StreamExt, executor::block_on};
use gpui::TestAppContext;
- use language_model::{LanguageModelRequestMessage, LanguageModelRequestTool};
+ use language_model::{
+ LanguageModelRequestMessage, LanguageModelRequestTool, LanguageModelToolResult,
+ };
use open_ai::responses::{
ReasoningSummaryPart, ResponseFunctionToolCall, ResponseOutputItem, ResponseOutputMessage,
ResponseReasoningItem, ResponseStatusDetails, ResponseSummary, ResponseUsage,
@@ -1692,7 +1681,7 @@ mod tests {
{
"type": "function_call_output",
"call_id": "call-42",
- "output": "{\"forecast\":\"Sunny\"}"
+ "output": "Sunny"
}
],
"stream": true,