Fix agent2 compilation errors and warnings

Nathan Sobo created

- Add cloud_llm_client dependency for CompletionIntent and CompletionMode
- Fix LanguageModelRequest initialization with missing thinking_allowed field
- Update StartMessage handling to use Assistant role
- Fix MessageContent conversions to use enum variants directly
- Fix input_schema implementation to use schemars directly
- Suppress unused variable and dead code warnings

Change summary

Cargo.lock                   |  1 
crates/agent2/Cargo.toml     |  1 
crates/agent2/src/prompts.rs |  5 ++-
crates/agent2/src/thread.rs  | 42 +++++++++++++++++++++++--------------
4 files changed, 31 insertions(+), 18 deletions(-)

Detailed changes

Cargo.lock 🔗

@@ -157,6 +157,7 @@ dependencies = [
  "assistant_tools",
  "chrono",
  "client",
+ "cloud_llm_client",
  "collections",
  "ctor",
  "env_logger 0.11.8",

crates/agent2/Cargo.toml 🔗

@@ -16,6 +16,7 @@ anyhow.workspace = true
 assistant_tool.workspace = true
 assistant_tools.workspace = true
 chrono.workspace = true
+cloud_llm_client.workspace = true
 collections.workspace = true
 fs.workspace = true
 futures.workspace = true

crates/agent2/src/prompts.rs 🔗

@@ -6,11 +6,12 @@ use anyhow::Result;
 use gpui::{App, Entity};
 use project::Project;
 
-struct BasePrompt {
+#[allow(dead_code)]
+struct _BasePrompt {
     project: Entity<Project>,
 }
 
-impl Prompt for BasePrompt {
+impl Prompt for _BasePrompt {
     fn render(&self, templates: &Templates, cx: &App) -> Result<String> {
         BaseTemplate {
             os: std::env::consts::OS.to_string(),

crates/agent2/src/thread.rs 🔗

@@ -1,12 +1,13 @@
 use crate::templates::Templates;
 use anyhow::{anyhow, Result};
+use cloud_llm_client::{CompletionIntent, CompletionMode};
 use futures::{channel::mpsc, future};
 use gpui::{App, Context, SharedString, Task};
 use language_model::{
-    CompletionIntent, CompletionMode, LanguageModel, LanguageModelCompletionError,
-    LanguageModelCompletionEvent, LanguageModelRequest, LanguageModelRequestMessage,
-    LanguageModelRequestTool, LanguageModelToolResult, LanguageModelToolResultContent,
-    LanguageModelToolSchemaFormat, LanguageModelToolUse, MessageContent, Role, StopReason,
+    LanguageModel, LanguageModelCompletionError, LanguageModelCompletionEvent,
+    LanguageModelRequest, LanguageModelRequestMessage, LanguageModelRequestTool,
+    LanguageModelToolResult, LanguageModelToolResultContent, LanguageModelToolSchemaFormat,
+    LanguageModelToolUse, MessageContent, Role, StopReason,
 };
 use schemars::{JsonSchema, Schema};
 use serde::Deserialize;
@@ -138,7 +139,10 @@ impl Thread {
                         .update(cx, |thread, _cx| {
                             thread.messages.push(AgentMessage {
                                 role: Role::User,
-                                content: tool_results.into_iter().map(Into::into).collect(),
+                                content: tool_results
+                                    .into_iter()
+                                    .map(MessageContent::ToolResult)
+                                    .collect(),
                             });
                         })
                         .ok();
@@ -187,27 +191,30 @@ impl Thread {
 
         match event {
             Text(new_text) => self.handle_text_event(new_text, cx),
-            Thinking { text, signature } => {
+            Thinking {
+                text: _text,
+                signature: _signature,
+            } => {
                 todo!()
             }
             ToolUse(tool_use) => {
                 return self.handle_tool_use_event(tool_use, cx);
             }
-            StartMessage { role, .. } => {
+            StartMessage { .. } => {
                 self.messages.push(AgentMessage {
-                    role,
+                    role: Role::Assistant,
                     content: Vec::new(),
                 });
             }
             UsageUpdate(_) => {}
             Stop(stop_reason) => self.handle_stop_event(stop_reason),
             StatusUpdate(_completion_request_status) => {}
-            RedactedThinking { data } => todo!(),
+            RedactedThinking { data: _data } => todo!(),
             ToolUseJsonParseError {
-                id,
-                tool_name,
-                raw_input,
-                json_parse_error,
+                id: _id,
+                tool_name: _tool_name,
+                raw_input: _raw_input,
+                json_parse_error: _json_parse_error,
             } => todo!(),
         }
 
@@ -256,7 +263,9 @@ impl Thread {
             }
         });
         if push_new_tool_use {
-            last_message.content.push(tool_use.clone().into());
+            last_message
+                .content
+                .push(MessageContent::ToolUse(tool_use.clone()));
         }
 
         if !tool_use.is_input_complete {
@@ -340,6 +349,7 @@ impl Thread {
             tool_choice: None,
             stop: Vec::new(),
             temperature: None,
+            thinking_allowed: false,
         }
     }
 
@@ -373,8 +383,8 @@ where
     }
 
     /// Returns the JSON schema that describes the tool's input.
-    fn input_schema(&self, format: LanguageModelToolSchemaFormat) -> Schema {
-        assistant_tools::root_schema_for::<Self::Input>(format)
+    fn input_schema(&self, _format: LanguageModelToolSchemaFormat) -> Schema {
+        schemars::schema_for!(Self::Input)
     }
 
     /// Runs the tool with the provided input.