1use crate::{prompts::BasePrompt, templates::Templates};
2use agent_client_protocol as acp;
3use anyhow::{anyhow, Result};
4use cloud_llm_client::{CompletionIntent, CompletionMode};
5use collections::HashMap;
6use futures::{channel::mpsc, stream::FuturesUnordered};
7use gpui::{App, Context, Entity, ImageFormat, SharedString, Task};
8use language_model::{
9 LanguageModel, LanguageModelCompletionError, LanguageModelCompletionEvent, LanguageModelImage,
10 LanguageModelRequest, LanguageModelRequestMessage, LanguageModelRequestTool,
11 LanguageModelToolResult, LanguageModelToolResultContent, LanguageModelToolSchemaFormat,
12 LanguageModelToolUse, LanguageModelToolUseId, MessageContent, Role, StopReason,
13};
14use log;
15use project::Project;
16use schemars::{JsonSchema, Schema};
17use serde::Deserialize;
18use smol::stream::StreamExt;
19use std::{collections::BTreeMap, fmt::Write, sync::Arc};
20use util::{markdown::MarkdownCodeBlock, ResultExt};
21
22#[derive(Debug, Clone)]
23pub struct AgentMessage {
24 pub role: Role,
25 pub content: Vec<MessageContent>,
26}
27
28impl AgentMessage {
29 pub fn to_markdown(&self) -> String {
30 let mut markdown = format!("## {}\n", self.role);
31
32 for content in &self.content {
33 match content {
34 MessageContent::Text(text) => {
35 markdown.push_str(text);
36 markdown.push('\n');
37 }
38 MessageContent::Thinking { text, .. } => {
39 markdown.push_str("<think>");
40 markdown.push_str(text);
41 markdown.push_str("</think>\n");
42 }
43 MessageContent::RedactedThinking(_) => markdown.push_str("<redacted_thinking />\n"),
44 MessageContent::Image(_) => {
45 markdown.push_str("<image />\n");
46 }
47 MessageContent::ToolUse(tool_use) => {
48 markdown.push_str(&format!(
49 "**Tool Use**: {} (ID: {})\n",
50 tool_use.name, tool_use.id
51 ));
52 markdown.push_str(&format!(
53 "{}\n",
54 MarkdownCodeBlock {
55 tag: "json",
56 text: &format!("{:#}", tool_use.input)
57 }
58 ));
59 }
60 MessageContent::ToolResult(tool_result) => {
61 markdown.push_str(&format!(
62 "**Tool Result**: {} (ID: {})\n\n",
63 tool_result.tool_name, tool_result.tool_use_id
64 ));
65 if tool_result.is_error {
66 markdown.push_str("**ERROR:**\n");
67 }
68
69 match &tool_result.content {
70 LanguageModelToolResultContent::Text(text) => {
71 writeln!(markdown, "{text}\n").ok();
72 }
73 LanguageModelToolResultContent::Image(_) => {
74 writeln!(markdown, "<image />\n").ok();
75 }
76 }
77
78 if let Some(output) = tool_result.output.as_ref() {
79 writeln!(
80 markdown,
81 "**Debug Output**:\n\n```json\n{}\n```\n",
82 serde_json::to_string_pretty(output).unwrap()
83 )
84 .unwrap();
85 }
86 }
87 }
88 }
89
90 markdown
91 }
92}
93
94#[derive(Debug)]
95pub enum AgentResponseEvent {
96 Text(String),
97 Thinking(String),
98 ToolCall(acp::ToolCall),
99 ToolCallUpdate(acp::ToolCallUpdate),
100 Stop(acp::StopReason),
101}
102
103pub trait Prompt {
104 fn render(&self, prompts: &Templates, cx: &App) -> Result<String>;
105}
106
107pub struct Thread {
108 messages: Vec<AgentMessage>,
109 completion_mode: CompletionMode,
110 /// Holds the task that handles agent interaction until the end of the turn.
111 /// Survives across multiple requests as the model performs tool calls and
112 /// we run tools, report their results.
113 running_turn: Option<Task<()>>,
114 pending_tool_uses: HashMap<LanguageModelToolUseId, LanguageModelToolUse>,
115 system_prompts: Vec<Arc<dyn Prompt>>,
116 tools: BTreeMap<SharedString, Arc<dyn AnyAgentTool>>,
117 templates: Arc<Templates>,
118 pub selected_model: Arc<dyn LanguageModel>,
119 // action_log: Entity<ActionLog>,
120}
121
122impl Thread {
123 pub fn new(
124 project: Entity<Project>,
125 templates: Arc<Templates>,
126 default_model: Arc<dyn LanguageModel>,
127 ) -> Self {
128 Self {
129 messages: Vec::new(),
130 completion_mode: CompletionMode::Normal,
131 system_prompts: vec![Arc::new(BasePrompt::new(project))],
132 running_turn: None,
133 pending_tool_uses: HashMap::default(),
134 tools: BTreeMap::default(),
135 templates,
136 selected_model: default_model,
137 }
138 }
139
140 pub fn set_mode(&mut self, mode: CompletionMode) {
141 self.completion_mode = mode;
142 }
143
144 pub fn messages(&self) -> &[AgentMessage] {
145 &self.messages
146 }
147
148 pub fn add_tool(&mut self, tool: impl AgentTool) {
149 self.tools.insert(tool.name(), tool.erase());
150 }
151
152 pub fn remove_tool(&mut self, name: &str) -> bool {
153 self.tools.remove(name).is_some()
154 }
155
156 pub fn cancel(&mut self) {
157 self.running_turn.take();
158
159 let tool_results = self
160 .pending_tool_uses
161 .drain()
162 .map(|(tool_use_id, tool_use)| {
163 MessageContent::ToolResult(LanguageModelToolResult {
164 tool_use_id,
165 tool_name: tool_use.name.clone(),
166 is_error: true,
167 content: LanguageModelToolResultContent::Text("Tool canceled by user".into()),
168 output: None,
169 })
170 })
171 .collect::<Vec<_>>();
172 self.last_user_message().content.extend(tool_results);
173 }
174
175 /// Sending a message results in the model streaming a response, which could include tool calls.
176 /// After calling tools, the model will stops and waits for any outstanding tool calls to be completed and their results sent.
177 /// The returned channel will report all the occurrences in which the model stops before erroring or ending its turn.
178 pub fn send(
179 &mut self,
180 model: Arc<dyn LanguageModel>,
181 content: impl Into<MessageContent>,
182 cx: &mut Context<Self>,
183 ) -> mpsc::UnboundedReceiver<Result<AgentResponseEvent, LanguageModelCompletionError>> {
184 let content = content.into();
185 log::info!("Thread::send called with model: {:?}", model.name());
186 log::debug!("Thread::send content: {:?}", content);
187
188 cx.notify();
189 let (events_tx, events_rx) =
190 mpsc::unbounded::<Result<AgentResponseEvent, LanguageModelCompletionError>>();
191
192 let user_message_ix = self.messages.len();
193 self.messages.push(AgentMessage {
194 role: Role::User,
195 content: vec![content],
196 });
197 log::info!("Total messages in thread: {}", self.messages.len());
198 self.running_turn = Some(cx.spawn(async move |thread, cx| {
199 log::info!("Starting agent turn execution");
200 let turn_result = async {
201 // Perform one request, then keep looping if the model makes tool calls.
202 let mut completion_intent = CompletionIntent::UserPrompt;
203 'outer: loop {
204 log::debug!(
205 "Building completion request with intent: {:?}",
206 completion_intent
207 );
208 let request = thread.update(cx, |thread, cx| {
209 thread.build_completion_request(completion_intent, cx)
210 })?;
211
212 // println!(
213 // "request: {}",
214 // serde_json::to_string_pretty(&request).unwrap()
215 // );
216
217 // Stream events, appending to messages and collecting up tool uses.
218 log::info!("Calling model.stream_completion");
219 let mut events = model.stream_completion(request, cx).await?;
220 log::debug!("Stream completion started successfully");
221 let mut tool_uses = FuturesUnordered::new();
222 while let Some(event) = events.next().await {
223 match event {
224 Ok(LanguageModelCompletionEvent::Stop(reason)) => {
225 if let Some(reason) = to_acp_stop_reason(reason) {
226 events_tx
227 .unbounded_send(Ok(AgentResponseEvent::Stop(reason)))
228 .ok();
229 }
230
231 if reason == StopReason::Refusal {
232 thread.update(cx, |thread, _cx| {
233 thread.messages.truncate(user_message_ix);
234 })?;
235 break 'outer;
236 }
237 }
238 Ok(event) => {
239 log::trace!("Received completion event: {:?}", event);
240 thread
241 .update(cx, |thread, cx| {
242 tool_uses.extend(thread.handle_streamed_completion_event(
243 event, &events_tx, cx,
244 ));
245 })
246 .ok();
247 }
248 Err(error) => {
249 log::error!("Error in completion stream: {:?}", error);
250 events_tx.unbounded_send(Err(error)).ok();
251 break;
252 }
253 }
254 }
255
256 // If there are no tool uses, the turn is done.
257 if tool_uses.is_empty() {
258 log::info!("No tool uses found, completing turn");
259 break;
260 }
261 log::info!("Found {} tool uses to execute", tool_uses.len());
262
263 // As tool results trickle in, insert them in the last user
264 // message so that they can be sent on the next tick of the
265 // agentic loop.
266 while let Some(tool_result) = tool_uses.next().await {
267 log::info!("Tool finished {:?}", tool_result);
268
269 events_tx
270 .unbounded_send(Ok(AgentResponseEvent::ToolCallUpdate(
271 to_acp_tool_call_update(&tool_result),
272 )))
273 .ok();
274 thread
275 .update(cx, |thread, _cx| {
276 thread.pending_tool_uses.remove(&tool_result.tool_use_id);
277 thread
278 .last_user_message()
279 .content
280 .push(MessageContent::ToolResult(tool_result));
281 })
282 .ok();
283 }
284
285 completion_intent = CompletionIntent::ToolResults;
286 }
287
288 Ok(())
289 }
290 .await;
291
292 if let Err(error) = turn_result {
293 log::error!("Turn execution failed: {:?}", error);
294 events_tx.unbounded_send(Err(error)).ok();
295 } else {
296 log::info!("Turn execution completed successfully");
297 }
298 }));
299 events_rx
300 }
301
302 pub fn build_system_message(&self, cx: &App) -> Option<AgentMessage> {
303 log::debug!("Building system message");
304 let mut system_message = AgentMessage {
305 role: Role::System,
306 content: Vec::new(),
307 };
308
309 for prompt in &self.system_prompts {
310 if let Some(rendered_prompt) = prompt.render(&self.templates, cx).log_err() {
311 system_message
312 .content
313 .push(MessageContent::Text(rendered_prompt));
314 }
315 }
316
317 let result = (!system_message.content.is_empty()).then_some(system_message);
318 log::debug!("System message built: {}", result.is_some());
319 result
320 }
321
322 /// A helper method that's called on every streamed completion event.
323 /// Returns an optional tool result task, which the main agentic loop in
324 /// send will send back to the model when it resolves.
325 fn handle_streamed_completion_event(
326 &mut self,
327 event: LanguageModelCompletionEvent,
328 events_tx: &mpsc::UnboundedSender<Result<AgentResponseEvent, LanguageModelCompletionError>>,
329 cx: &mut Context<Self>,
330 ) -> Option<Task<LanguageModelToolResult>> {
331 log::trace!("Handling streamed completion event: {:?}", event);
332 use LanguageModelCompletionEvent::*;
333
334 match event {
335 StartMessage { .. } => {
336 self.messages.push(AgentMessage {
337 role: Role::Assistant,
338 content: Vec::new(),
339 });
340 }
341 Text(new_text) => self.handle_text_event(new_text, events_tx, cx),
342 Thinking { text, signature } => {
343 self.handle_thinking_event(text, signature, events_tx, cx)
344 }
345 RedactedThinking { data } => self.handle_redacted_thinking_event(data, cx),
346 ToolUse(tool_use) => {
347 return self.handle_tool_use_event(tool_use, events_tx, cx);
348 }
349 ToolUseJsonParseError {
350 id,
351 tool_name,
352 raw_input,
353 json_parse_error,
354 } => {
355 return Some(Task::ready(self.handle_tool_use_json_parse_error_event(
356 id,
357 tool_name,
358 raw_input,
359 json_parse_error,
360 )));
361 }
362 UsageUpdate(_) | StatusUpdate(_) => {}
363 Stop(_) => unreachable!(),
364 }
365
366 None
367 }
368
369 fn handle_text_event(
370 &mut self,
371 new_text: String,
372 events_tx: &mpsc::UnboundedSender<Result<AgentResponseEvent, LanguageModelCompletionError>>,
373 cx: &mut Context<Self>,
374 ) {
375 events_tx
376 .unbounded_send(Ok(AgentResponseEvent::Text(new_text.clone())))
377 .ok();
378
379 let last_message = self.last_assistant_message();
380 if let Some(MessageContent::Text(text)) = last_message.content.last_mut() {
381 text.push_str(&new_text);
382 } else {
383 last_message.content.push(MessageContent::Text(new_text));
384 }
385
386 cx.notify();
387 }
388
389 fn handle_thinking_event(
390 &mut self,
391 new_text: String,
392 new_signature: Option<String>,
393 events_tx: &mpsc::UnboundedSender<Result<AgentResponseEvent, LanguageModelCompletionError>>,
394 cx: &mut Context<Self>,
395 ) {
396 events_tx
397 .unbounded_send(Ok(AgentResponseEvent::Thinking(new_text.clone())))
398 .ok();
399
400 let last_message = self.last_assistant_message();
401 if let Some(MessageContent::Thinking { text, signature }) = last_message.content.last_mut()
402 {
403 text.push_str(&new_text);
404 *signature = new_signature.or(signature.take());
405 } else {
406 last_message.content.push(MessageContent::Thinking {
407 text: new_text,
408 signature: new_signature,
409 });
410 }
411
412 cx.notify();
413 }
414
415 fn handle_redacted_thinking_event(&mut self, data: String, cx: &mut Context<Self>) {
416 let last_message = self.last_assistant_message();
417 last_message
418 .content
419 .push(MessageContent::RedactedThinking(data));
420 cx.notify();
421 }
422
423 fn handle_tool_use_event(
424 &mut self,
425 tool_use: LanguageModelToolUse,
426 events_tx: &mpsc::UnboundedSender<Result<AgentResponseEvent, LanguageModelCompletionError>>,
427 cx: &mut Context<Self>,
428 ) -> Option<Task<LanguageModelToolResult>> {
429 cx.notify();
430
431 self.pending_tool_uses
432 .insert(tool_use.id.clone(), tool_use.clone());
433 let last_message = self.last_assistant_message();
434
435 // Ensure the last message ends in the current tool use
436 let push_new_tool_use = last_message.content.last_mut().map_or(true, |content| {
437 if let MessageContent::ToolUse(last_tool_use) = content {
438 if last_tool_use.id == tool_use.id {
439 *last_tool_use = tool_use.clone();
440 false
441 } else {
442 true
443 }
444 } else {
445 true
446 }
447 });
448 if push_new_tool_use {
449 events_tx
450 .unbounded_send(Ok(AgentResponseEvent::ToolCall(acp::ToolCall {
451 id: acp::ToolCallId(tool_use.id.to_string().into()),
452 title: tool_use.name.to_string(),
453 kind: acp::ToolKind::Other,
454 status: acp::ToolCallStatus::Pending,
455 content: vec![],
456 locations: vec![],
457 raw_input: Some(tool_use.input.clone()),
458 })))
459 .ok();
460 last_message
461 .content
462 .push(MessageContent::ToolUse(tool_use.clone()));
463 } else {
464 events_tx
465 .unbounded_send(Ok(AgentResponseEvent::ToolCallUpdate(
466 acp::ToolCallUpdate {
467 id: acp::ToolCallId(tool_use.id.to_string().into()),
468 fields: acp::ToolCallUpdateFields {
469 raw_input: Some(tool_use.input.clone()),
470 ..Default::default()
471 },
472 },
473 )))
474 .ok();
475 }
476
477 if !tool_use.is_input_complete {
478 return None;
479 }
480
481 if let Some(tool) = self.tools.get(tool_use.name.as_ref()) {
482 events_tx
483 .unbounded_send(Ok(AgentResponseEvent::ToolCallUpdate(
484 acp::ToolCallUpdate {
485 id: acp::ToolCallId(tool_use.id.to_string().into()),
486 fields: acp::ToolCallUpdateFields {
487 status: Some(acp::ToolCallStatus::InProgress),
488 ..Default::default()
489 },
490 },
491 )))
492 .ok();
493
494 let pending_tool_result = tool.clone().run(tool_use.input, cx);
495
496 Some(cx.foreground_executor().spawn(async move {
497 match pending_tool_result.await {
498 Ok(tool_output) => LanguageModelToolResult {
499 tool_use_id: tool_use.id,
500 tool_name: tool_use.name,
501 is_error: false,
502 content: LanguageModelToolResultContent::Text(Arc::from(tool_output)),
503 output: None,
504 },
505 Err(error) => LanguageModelToolResult {
506 tool_use_id: tool_use.id,
507 tool_name: tool_use.name,
508 is_error: true,
509 content: LanguageModelToolResultContent::Text(Arc::from(error.to_string())),
510 output: None,
511 },
512 }
513 }))
514 } else {
515 let content = format!("No tool named {} exists", tool_use.name);
516 Some(Task::ready(LanguageModelToolResult {
517 content: LanguageModelToolResultContent::Text(Arc::from(content)),
518 tool_use_id: tool_use.id,
519 tool_name: tool_use.name,
520 is_error: true,
521 output: None,
522 }))
523 }
524 }
525
526 fn handle_tool_use_json_parse_error_event(
527 &mut self,
528 tool_use_id: LanguageModelToolUseId,
529 tool_name: Arc<str>,
530 raw_input: Arc<str>,
531 json_parse_error: String,
532 ) -> LanguageModelToolResult {
533 let tool_output = format!("Error parsing input JSON: {json_parse_error}");
534 LanguageModelToolResult {
535 tool_use_id,
536 tool_name,
537 is_error: true,
538 content: LanguageModelToolResultContent::Text(tool_output.into()),
539 output: Some(serde_json::Value::String(raw_input.to_string())),
540 }
541 }
542
543 /// Guarantees the last message is from the assistant and returns a mutable reference.
544 fn last_assistant_message(&mut self) -> &mut AgentMessage {
545 if self
546 .messages
547 .last()
548 .map_or(true, |m| m.role != Role::Assistant)
549 {
550 self.messages.push(AgentMessage {
551 role: Role::Assistant,
552 content: Vec::new(),
553 });
554 }
555 self.messages.last_mut().unwrap()
556 }
557
558 /// Guarantees the last message is from the user and returns a mutable reference.
559 fn last_user_message(&mut self) -> &mut AgentMessage {
560 if self.messages.last().map_or(true, |m| m.role != Role::User) {
561 self.messages.push(AgentMessage {
562 role: Role::User,
563 content: Vec::new(),
564 });
565 }
566 self.messages.last_mut().unwrap()
567 }
568
569 fn build_completion_request(
570 &self,
571 completion_intent: CompletionIntent,
572 cx: &mut App,
573 ) -> LanguageModelRequest {
574 log::debug!("Building completion request");
575 log::debug!("Completion intent: {:?}", completion_intent);
576 log::debug!("Completion mode: {:?}", self.completion_mode);
577
578 let messages = self.build_request_messages(cx);
579 log::info!("Request will include {} messages", messages.len());
580
581 let tools: Vec<LanguageModelRequestTool> = self
582 .tools
583 .values()
584 .filter_map(|tool| {
585 let tool_name = tool.name().to_string();
586 log::trace!("Including tool: {}", tool_name);
587 Some(LanguageModelRequestTool {
588 name: tool_name,
589 description: tool.description(cx).to_string(),
590 input_schema: tool
591 .input_schema(LanguageModelToolSchemaFormat::JsonSchema)
592 .log_err()?,
593 })
594 })
595 .collect();
596
597 log::info!("Request includes {} tools", tools.len());
598
599 let request = LanguageModelRequest {
600 thread_id: None,
601 prompt_id: None,
602 intent: Some(completion_intent),
603 mode: Some(self.completion_mode),
604 messages,
605 tools,
606 tool_choice: None,
607 stop: Vec::new(),
608 temperature: None,
609 thinking_allowed: true,
610 };
611
612 log::debug!("Completion request built successfully");
613 request
614 }
615
616 fn build_request_messages(&self, cx: &App) -> Vec<LanguageModelRequestMessage> {
617 log::trace!(
618 "Building request messages from {} thread messages",
619 self.messages.len()
620 );
621
622 let messages = self
623 .build_system_message(cx)
624 .iter()
625 .chain(self.messages.iter())
626 .map(|message| {
627 log::trace!(
628 " - {} message with {} content items",
629 match message.role {
630 Role::System => "System",
631 Role::User => "User",
632 Role::Assistant => "Assistant",
633 },
634 message.content.len()
635 );
636 LanguageModelRequestMessage {
637 role: message.role,
638 content: message.content.clone(),
639 cache: false,
640 }
641 })
642 .collect();
643 messages
644 }
645
646 pub fn to_markdown(&self) -> String {
647 let mut markdown = String::new();
648 for message in &self.messages {
649 markdown.push_str(&message.to_markdown());
650 }
651 markdown
652 }
653}
654
655pub trait AgentTool
656where
657 Self: 'static + Sized,
658{
659 type Input: for<'de> Deserialize<'de> + JsonSchema;
660
661 fn name(&self) -> SharedString;
662 fn description(&self, _cx: &mut App) -> SharedString {
663 let schema = schemars::schema_for!(Self::Input);
664 SharedString::new(
665 schema
666 .get("description")
667 .and_then(|description| description.as_str())
668 .unwrap_or_default(),
669 )
670 }
671
672 /// Returns the JSON schema that describes the tool's input.
673 fn input_schema(&self, _format: LanguageModelToolSchemaFormat) -> Schema {
674 schemars::schema_for!(Self::Input)
675 }
676
677 /// Runs the tool with the provided input.
678 fn run(self: Arc<Self>, input: Self::Input, cx: &mut App) -> Task<Result<String>>;
679
680 fn erase(self) -> Arc<dyn AnyAgentTool> {
681 Arc::new(Erased(Arc::new(self)))
682 }
683}
684
685pub struct Erased<T>(T);
686
687pub trait AnyAgentTool {
688 fn name(&self) -> SharedString;
689 fn description(&self, cx: &mut App) -> SharedString;
690 fn input_schema(&self, format: LanguageModelToolSchemaFormat) -> Result<serde_json::Value>;
691 fn run(self: Arc<Self>, input: serde_json::Value, cx: &mut App) -> Task<Result<String>>;
692}
693
694impl<T> AnyAgentTool for Erased<Arc<T>>
695where
696 T: AgentTool,
697{
698 fn name(&self) -> SharedString {
699 self.0.name()
700 }
701
702 fn description(&self, cx: &mut App) -> SharedString {
703 self.0.description(cx)
704 }
705
706 fn input_schema(&self, format: LanguageModelToolSchemaFormat) -> Result<serde_json::Value> {
707 Ok(serde_json::to_value(self.0.input_schema(format))?)
708 }
709
710 fn run(self: Arc<Self>, input: serde_json::Value, cx: &mut App) -> Task<Result<String>> {
711 let parsed_input: Result<T::Input> = serde_json::from_value(input).map_err(Into::into);
712 match parsed_input {
713 Ok(input) => self.0.clone().run(input, cx),
714 Err(error) => Task::ready(Err(anyhow!(error))),
715 }
716 }
717}
718
719fn to_acp_stop_reason(reason: StopReason) -> Option<acp::StopReason> {
720 match reason {
721 StopReason::EndTurn => Some(acp::StopReason::EndTurn),
722 StopReason::MaxTokens => Some(acp::StopReason::MaxTokens),
723 StopReason::Refusal => Some(acp::StopReason::Refusal),
724 StopReason::ToolUse => None,
725 }
726}
727
728fn to_acp_tool_call_update(tool_result: &LanguageModelToolResult) -> acp::ToolCallUpdate {
729 let status = if tool_result.is_error {
730 acp::ToolCallStatus::Failed
731 } else {
732 acp::ToolCallStatus::Completed
733 };
734 let content = match &tool_result.content {
735 LanguageModelToolResultContent::Text(text) => text.to_string().into(),
736 LanguageModelToolResultContent::Image(LanguageModelImage { source, .. }) => {
737 acp::ToolCallContent::Content {
738 content: acp::ContentBlock::Image(acp::ImageContent {
739 annotations: None,
740 data: source.to_string(),
741 mime_type: ImageFormat::Png.mime_type().to_string(),
742 }),
743 }
744 }
745 };
746 acp::ToolCallUpdate {
747 id: acp::ToolCallId(tool_result.tool_use_id.to_string().into()),
748 fields: acp::ToolCallUpdateFields {
749 status: Some(status),
750 content: Some(vec![content]),
751 ..Default::default()
752 },
753 }
754}