cloud_llm_client: Move `CompletionIntent` to `language_model` (#52359)

Marshall Bowers created

This PR moves the `CompletionIntent` enum from the `cloud_llm_client`
crate to the `language_model` crate, as it is no longer part of the
Cloud interface.

Release Notes:

- N/A

Change summary

Cargo.lock                                          |  3 --
crates/agent/src/edit_agent.rs                      |  5 +--
crates/agent/src/tests/mod.rs                       |  5 +--
crates/agent/src/thread.rs                          | 13 +++++------
crates/agent/src/tools/edit_file_tool.rs            |  3 -
crates/agent_ui/Cargo.toml                          |  1 
crates/agent_ui/src/buffer_codegen.rs               |  6 +---
crates/agent_ui/src/terminal_inline_assistant.rs    |  5 +--
crates/assistant_text_thread/Cargo.toml             |  1 
crates/assistant_text_thread/src/text_thread.rs     |  7 ++---
crates/cloud_llm_client/src/cloud_llm_client.rs     | 17 ---------------
crates/git_ui/Cargo.toml                            |  1 
crates/git_ui/src/git_panel.rs                      |  4 +-
crates/language_model/src/request.rs                | 16 +++++++++++++
crates/language_models/src/provider/cloud.rs        |  5 ----
crates/language_models/src/provider/copilot_chat.rs |  3 -
16 files changed, 36 insertions(+), 59 deletions(-)

Detailed changes

Cargo.lock 🔗

@@ -343,7 +343,6 @@ dependencies = [
  "chrono",
  "client",
  "cloud_api_types",
- "cloud_llm_client",
  "collections",
  "command_palette_hooks",
  "component",
@@ -864,7 +863,6 @@ dependencies = [
  "chrono",
  "client",
  "clock",
- "cloud_llm_client",
  "collections",
  "context_server",
  "fs",
@@ -7367,7 +7365,6 @@ dependencies = [
  "askpass",
  "buffer_diff",
  "call",
- "cloud_llm_client",
  "collections",
  "component",
  "ctor",

crates/agent/src/edit_agent.rs 🔗

@@ -8,7 +8,6 @@ pub mod streaming_fuzzy_matcher;
 use crate::{Template, Templates};
 use action_log::ActionLog;
 use anyhow::Result;
-use cloud_llm_client::CompletionIntent;
 use create_file_parser::{CreateFileParser, CreateFileParserEvent};
 pub use edit_parser::EditFormat;
 use edit_parser::{EditParser, EditParserEvent, EditParserMetrics};
@@ -21,8 +20,8 @@ use futures::{
 use gpui::{AppContext, AsyncApp, Entity, Task};
 use language::{Anchor, Buffer, BufferSnapshot, LineIndent, Point, TextBufferSnapshot};
 use language_model::{
-    LanguageModel, LanguageModelCompletionError, LanguageModelRequest, LanguageModelRequestMessage,
-    LanguageModelToolChoice, MessageContent, Role,
+    CompletionIntent, LanguageModel, LanguageModelCompletionError, LanguageModelRequest,
+    LanguageModelRequestMessage, LanguageModelToolChoice, MessageContent, Role,
 };
 use project::{AgentLocation, Project};
 use reindent::{IndentDelta, Reindenter};

crates/agent/src/tests/mod.rs 🔗

@@ -7,7 +7,6 @@ use agent_client_protocol::{self as acp};
 use agent_settings::AgentProfileId;
 use anyhow::Result;
 use client::{Client, UserStore};
-use cloud_llm_client::CompletionIntent;
 use collections::IndexMap;
 use context_server::{ContextServer, ContextServerCommand, ContextServerId};
 use feature_flags::FeatureFlagAppExt as _;
@@ -26,8 +25,8 @@ use gpui::{
 };
 use indoc::indoc;
 use language_model::{
-    LanguageModel, LanguageModelCompletionError, LanguageModelCompletionEvent, LanguageModelId,
-    LanguageModelProviderName, LanguageModelRegistry, LanguageModelRequest,
+    CompletionIntent, LanguageModel, LanguageModelCompletionError, LanguageModelCompletionEvent,
+    LanguageModelId, LanguageModelProviderName, LanguageModelRegistry, LanguageModelRequest,
     LanguageModelRequestMessage, LanguageModelToolResult, LanguageModelToolSchemaFormat,
     LanguageModelToolUse, MessageContent, Role, StopReason, TokenUsage,
     fake_provider::FakeLanguageModel,

crates/agent/src/thread.rs 🔗

@@ -20,7 +20,6 @@ use anyhow::{Context as _, Result, anyhow};
 use chrono::{DateTime, Utc};
 use client::UserStore;
 use cloud_api_types::Plan;
-use cloud_llm_client::CompletionIntent;
 use collections::{HashMap, HashSet, IndexMap};
 use fs::Fs;
 use futures::stream;
@@ -35,12 +34,12 @@ use gpui::{
 };
 use heck::ToSnakeCase as _;
 use language_model::{
-    LanguageModel, LanguageModelCompletionError, LanguageModelCompletionEvent, LanguageModelId,
-    LanguageModelImage, LanguageModelProviderId, LanguageModelRegistry, LanguageModelRequest,
-    LanguageModelRequestMessage, LanguageModelRequestTool, LanguageModelToolResult,
-    LanguageModelToolResultContent, LanguageModelToolSchemaFormat, LanguageModelToolUse,
-    LanguageModelToolUseId, Role, SelectedModel, Speed, StopReason, TokenUsage,
-    ZED_CLOUD_PROVIDER_ID,
+    CompletionIntent, LanguageModel, LanguageModelCompletionError, LanguageModelCompletionEvent,
+    LanguageModelId, LanguageModelImage, LanguageModelProviderId, LanguageModelRegistry,
+    LanguageModelRequest, LanguageModelRequestMessage, LanguageModelRequestTool,
+    LanguageModelToolResult, LanguageModelToolResultContent, LanguageModelToolSchemaFormat,
+    LanguageModelToolUse, LanguageModelToolUseId, Role, SelectedModel, Speed, StopReason,
+    TokenUsage, ZED_CLOUD_PROVIDER_ID,
 };
 use project::Project;
 use prompt_store::ProjectContext;

crates/agent/src/tools/edit_file_tool.rs 🔗

@@ -8,14 +8,13 @@ use crate::{
 use acp_thread::Diff;
 use agent_client_protocol::{self as acp, ToolCallLocation, ToolCallUpdateFields};
 use anyhow::{Context as _, Result};
-use cloud_llm_client::CompletionIntent;
 use collections::HashSet;
 use futures::{FutureExt as _, StreamExt as _};
 use gpui::{App, AppContext, AsyncApp, Entity, Task, WeakEntity};
 use indoc::formatdoc;
 use language::language_settings::{self, FormatOnSave};
 use language::{LanguageRegistry, ToPoint};
-use language_model::LanguageModelToolResultContent;
+use language_model::{CompletionIntent, LanguageModelToolResultContent};
 use project::lsp_store::{FormatTrigger, LspFormatTarget};
 use project::{Project, ProjectPath};
 use schemars::JsonSchema;

crates/agent_ui/Cargo.toml 🔗

@@ -44,7 +44,6 @@ buffer_diff.workspace = true
 chrono.workspace = true
 client.workspace = true
 cloud_api_types.workspace = true
-cloud_llm_client.workspace = true
 collections.workspace = true
 command_palette_hooks.workspace = true
 component.workspace = true

crates/agent_ui/src/buffer_codegen.rs 🔗

@@ -1,9 +1,6 @@
 use crate::{context::LoadedContext, inline_prompt_editor::CodegenStatus};
 use agent_settings::AgentSettings;
 use anyhow::{Context as _, Result};
-use uuid::Uuid;
-
-use cloud_llm_client::CompletionIntent;
 use collections::HashSet;
 use editor::{Anchor, AnchorRangeExt, MultiBuffer, MultiBufferSnapshot, ToOffset as _, ToPoint};
 use futures::{
@@ -16,7 +13,7 @@ use futures::{
 use gpui::{App, AppContext as _, AsyncApp, Context, Entity, EventEmitter, Subscription, Task};
 use language::{Buffer, IndentKind, LanguageName, Point, TransactionId, line_diff};
 use language_model::{
-    LanguageModel, LanguageModelCompletionError, LanguageModelCompletionEvent,
+    CompletionIntent, LanguageModel, LanguageModelCompletionError, LanguageModelCompletionEvent,
     LanguageModelRegistry, LanguageModelRequest, LanguageModelRequestMessage,
     LanguageModelRequestTool, LanguageModelTextStream, LanguageModelToolChoice,
     LanguageModelToolUse, Role, TokenUsage,
@@ -40,6 +37,7 @@ use std::{
     time::Instant,
 };
 use streaming_diff::{CharOperation, LineDiff, LineOperation, StreamingDiff};
+use uuid::Uuid;
 
 /// Use this tool when you cannot or should not make a rewrite. This includes:
 /// - The user's request is unclear, ambiguous, or nonsensical

crates/agent_ui/src/terminal_inline_assistant.rs 🔗

@@ -10,15 +10,14 @@ use agent::ThreadStore;
 use agent_settings::AgentSettings;
 use anyhow::{Context as _, Result};
 
-use cloud_llm_client::CompletionIntent;
 use collections::{HashMap, VecDeque};
 use editor::{MultiBuffer, actions::SelectAll};
 use fs::Fs;
 use gpui::{App, Entity, Focusable, Global, Subscription, Task, UpdateGlobal, WeakEntity};
 use language::Buffer;
 use language_model::{
-    ConfiguredModel, LanguageModelRegistry, LanguageModelRequest, LanguageModelRequestMessage,
-    Role, report_anthropic_event,
+    CompletionIntent, ConfiguredModel, LanguageModelRegistry, LanguageModelRequest,
+    LanguageModelRequestMessage, Role, report_anthropic_event,
 };
 use project::Project;
 use prompt_store::{PromptBuilder, PromptStore};

crates/assistant_text_thread/Cargo.toml 🔗

@@ -21,7 +21,6 @@ assistant_slash_command.workspace = true
 chrono.workspace = true
 client.workspace = true
 clock.workspace = true
-cloud_llm_client.workspace = true
 collections.workspace = true
 context_server.workspace = true
 fs.workspace = true

crates/assistant_text_thread/src/text_thread.rs 🔗

@@ -6,7 +6,6 @@ use assistant_slash_command::{
 };
 use client::{self, proto};
 use clock::ReplicaId;
-use cloud_llm_client::CompletionIntent;
 use collections::{HashMap, HashSet};
 use fs::{Fs, RenameOptions};
 
@@ -18,9 +17,9 @@ use gpui::{
 use itertools::Itertools as _;
 use language::{AnchorRangeExt, Bias, Buffer, LanguageRegistry, OffsetRangeExt, Point, ToOffset};
 use language_model::{
-    AnthropicCompletionType, AnthropicEventData, AnthropicEventType, LanguageModel,
-    LanguageModelCacheConfiguration, LanguageModelCompletionEvent, LanguageModelImage,
-    LanguageModelRegistry, LanguageModelRequest, LanguageModelRequestMessage,
+    AnthropicCompletionType, AnthropicEventData, AnthropicEventType, CompletionIntent,
+    LanguageModel, LanguageModelCacheConfiguration, LanguageModelCompletionEvent,
+    LanguageModelImage, LanguageModelRegistry, LanguageModelRequest, LanguageModelRequestMessage,
     LanguageModelToolUseId, MessageContent, PaymentRequiredError, Role, StopReason,
     report_anthropic_event,
 };

crates/cloud_llm_client/src/cloud_llm_client.rs 🔗

@@ -193,29 +193,12 @@ pub enum EditPredictionRejectReason {
     Rejected,
 }
 
-#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Copy, Serialize, Deserialize)]
-#[serde(rename_all = "snake_case")]
-pub enum CompletionIntent {
-    UserPrompt,
-    Subagent,
-    ToolResults,
-    ThreadSummarization,
-    ThreadContextSummarization,
-    CreateFile,
-    EditFile,
-    InlineAssist,
-    TerminalInlineAssist,
-    GenerateGitCommitMessage,
-}
-
 #[derive(Debug, Serialize, Deserialize)]
 pub struct CompletionBody {
     #[serde(skip_serializing_if = "Option::is_none", default)]
     pub thread_id: Option<String>,
     #[serde(skip_serializing_if = "Option::is_none", default)]
     pub prompt_id: Option<String>,
-    #[serde(skip_serializing_if = "Option::is_none", default)]
-    pub intent: Option<CompletionIntent>,
     pub provider: LanguageModelProvider,
     pub model: String,
     pub provider_request: serde_json::Value,

crates/git_ui/Cargo.toml 🔗

@@ -21,7 +21,6 @@ anyhow.workspace = true
 askpass.workspace = true
 buffer_diff.workspace = true
 call.workspace = true
-cloud_llm_client.workspace = true
 collections.workspace = true
 component.workspace = true
 db.workspace = true

crates/git_ui/src/git_panel.rs 🔗

@@ -12,7 +12,6 @@ use crate::{
 use agent_settings::AgentSettings;
 use anyhow::Context as _;
 use askpass::AskPassDelegate;
-use cloud_llm_client::CompletionIntent;
 use collections::{BTreeMap, HashMap, HashSet};
 use db::kvp::KeyValueStore;
 use editor::{
@@ -45,7 +44,8 @@ use gpui::{
 use itertools::Itertools;
 use language::{Buffer, File};
 use language_model::{
-    ConfiguredModel, LanguageModelRegistry, LanguageModelRequest, LanguageModelRequestMessage, Role,
+    CompletionIntent, ConfiguredModel, LanguageModelRegistry, LanguageModelRequest,
+    LanguageModelRequestMessage, Role,
 };
 use menu;
 use multi_buffer::ExcerptInfo;

crates/language_model/src/request.rs 🔗

@@ -3,7 +3,6 @@ use std::sync::Arc;
 
 use anyhow::Result;
 use base64::write::EncoderWriter;
-use cloud_llm_client::CompletionIntent;
 use gpui::{
     App, AppContext as _, DevicePixels, Image, ImageFormat, ObjectFit, SharedString, Size, Task,
     point, px, size,
@@ -443,6 +442,21 @@ pub enum LanguageModelToolChoice {
     None,
 }
 
+#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Copy, Serialize, Deserialize)]
+#[serde(rename_all = "snake_case")]
+pub enum CompletionIntent {
+    UserPrompt,
+    Subagent,
+    ToolResults,
+    ThreadSummarization,
+    ThreadContextSummarization,
+    CreateFile,
+    EditFile,
+    InlineAssist,
+    TerminalInlineAssist,
+    GenerateGitCommitMessage,
+}
+
 #[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)]
 pub struct LanguageModelRequest {
     pub thread_id: Option<String>,

crates/language_models/src/provider/cloud.rs 🔗

@@ -769,7 +769,6 @@ impl LanguageModel for CloudLanguageModel {
     > {
         let thread_id = request.thread_id.clone();
         let prompt_id = request.prompt_id.clone();
-        let intent = request.intent;
         let app_version = Some(cx.update(|cx| AppVersion::global(cx)));
         let user_store = self.user_store.clone();
         let organization_id = cx.update(|cx| {
@@ -822,7 +821,6 @@ impl LanguageModel for CloudLanguageModel {
                         CompletionBody {
                             thread_id,
                             prompt_id,
-                            intent,
                             provider: cloud_llm_client::LanguageModelProvider::Anthropic,
                             model: request.model.clone(),
                             provider_request: serde_json::to_value(&request)
@@ -881,7 +879,6 @@ impl LanguageModel for CloudLanguageModel {
                         CompletionBody {
                             thread_id,
                             prompt_id,
-                            intent,
                             provider: cloud_llm_client::LanguageModelProvider::OpenAi,
                             model: request.model.clone(),
                             provider_request: serde_json::to_value(&request)
@@ -923,7 +920,6 @@ impl LanguageModel for CloudLanguageModel {
                         CompletionBody {
                             thread_id,
                             prompt_id,
-                            intent,
                             provider: cloud_llm_client::LanguageModelProvider::XAi,
                             model: request.model.clone(),
                             provider_request: serde_json::to_value(&request)
@@ -958,7 +954,6 @@ impl LanguageModel for CloudLanguageModel {
                         CompletionBody {
                             thread_id,
                             prompt_id,
-                            intent,
                             provider: cloud_llm_client::LanguageModelProvider::Google,
                             model: request.model.model_id.clone(),
                             provider_request: serde_json::to_value(&request)

crates/language_models/src/provider/copilot_chat.rs 🔗

@@ -4,7 +4,6 @@ use std::sync::Arc;
 
 use anthropic::AnthropicModelMode;
 use anyhow::{Result, anyhow};
-use cloud_llm_client::CompletionIntent;
 use collections::HashMap;
 use copilot::{GlobalCopilotAuth, Status};
 use copilot_chat::responses as copilot_responses;
@@ -21,7 +20,7 @@ use gpui::{AnyView, App, AsyncApp, Entity, Subscription, Task};
 use http_client::StatusCode;
 use language::language_settings::all_language_settings;
 use language_model::{
-    AuthenticateError, IconOrSvg, LanguageModel, LanguageModelCompletionError,
+    AuthenticateError, CompletionIntent, IconOrSvg, LanguageModel, LanguageModelCompletionError,
     LanguageModelCompletionEvent, LanguageModelCostInfo, LanguageModelEffortLevel, LanguageModelId,
     LanguageModelName, LanguageModelProvider, LanguageModelProviderId, LanguageModelProviderName,
     LanguageModelProviderState, LanguageModelRequest, LanguageModelRequestMessage,