Finish inline telemetry changes (#44842)

Mikayla Maki created

Closes #ISSUE

Release Notes:

- N/A

Change summary

Cargo.lock                                                      |   4 
crates/agent_ui/Cargo.toml                                      |   1 
crates/agent_ui/src/agent_ui.rs                                 |  11 
crates/agent_ui/src/buffer_codegen.rs                           | 148 
crates/agent_ui/src/inline_assistant.rs                         | 125 
crates/agent_ui/src/inline_prompt_editor.rs                     | 276 +-
crates/agent_ui/src/terminal_codegen.rs                         |  66 
crates/agent_ui/src/terminal_inline_assistant.rs                |  83 
crates/agent_ui/src/text_thread_editor.rs                       |   1 
crates/assistant_text_thread/Cargo.toml                         |   2 
crates/assistant_text_thread/src/assistant_text_thread_tests.rs |   9 
crates/assistant_text_thread/src/text_thread.rs                 |  52 
crates/assistant_text_thread/src/text_thread_store.rs           |  14 
crates/language_model/Cargo.toml                                |   1 
crates/language_model/src/telemetry.rs                          | 124 
crates/settings_ui/src/settings_ui.rs                           |   7 
16 files changed, 499 insertions(+), 425 deletions(-)

Detailed changes

Cargo.lock 🔗

@@ -388,7 +388,6 @@ dependencies = [
  "streaming_diff",
  "task",
  "telemetry",
- "telemetry_events",
  "terminal",
  "terminal_view",
  "text",
@@ -894,7 +893,7 @@ dependencies = [
  "settings",
  "smallvec",
  "smol",
- "telemetry_events",
+ "telemetry",
  "text",
  "ui",
  "unindent",
@@ -8817,7 +8816,6 @@ dependencies = [
  "serde_json",
  "settings",
  "smol",
- "telemetry_events",
  "thiserror 2.0.17",
  "util",
  "zed_env_vars",

crates/agent_ui/Cargo.toml 🔗

@@ -84,7 +84,6 @@ smol.workspace = true
 streaming_diff.workspace = true
 task.workspace = true
 telemetry.workspace = true
-telemetry_events.workspace = true
 terminal.workspace = true
 terminal_view.workspace = true
 text.workspace = true

crates/agent_ui/src/agent_ui.rs 🔗

@@ -216,7 +216,7 @@ pub fn init(
     is_eval: bool,
     cx: &mut App,
 ) {
-    assistant_text_thread::init(client.clone(), cx);
+    assistant_text_thread::init(client, cx);
     rules_library::init(cx);
     if !is_eval {
         // Initializing the language model from the user settings messes with the eval, so we only initialize them when
@@ -229,13 +229,8 @@ pub fn init(
     TextThreadEditor::init(cx);
 
     register_slash_commands(cx);
-    inline_assistant::init(
-        fs.clone(),
-        prompt_builder.clone(),
-        client.telemetry().clone(),
-        cx,
-    );
-    terminal_inline_assistant::init(fs.clone(), prompt_builder, client.telemetry().clone(), cx);
+    inline_assistant::init(fs.clone(), prompt_builder.clone(), cx);
+    terminal_inline_assistant::init(fs.clone(), prompt_builder, cx);
     cx.observe_new(move |workspace, window, cx| {
         ConfigureContextServerModal::register(workspace, language_registry.clone(), window, cx)
     })

crates/agent_ui/src/buffer_codegen.rs 🔗

@@ -1,8 +1,8 @@
 use crate::{context::LoadedContext, inline_prompt_editor::CodegenStatus};
 use agent_settings::AgentSettings;
 use anyhow::{Context as _, Result};
+use uuid::Uuid;
 
-use client::telemetry::Telemetry;
 use cloud_llm_client::CompletionIntent;
 use collections::HashSet;
 use editor::{Anchor, AnchorRangeExt, MultiBuffer, MultiBufferSnapshot, ToOffset as _, ToPoint};
@@ -15,12 +15,12 @@ use futures::{
     stream::BoxStream,
 };
 use gpui::{App, AppContext as _, AsyncApp, Context, Entity, EventEmitter, Subscription, Task};
-use language::{Buffer, IndentKind, Point, TransactionId, line_diff};
+use language::{Buffer, IndentKind, LanguageName, Point, TransactionId, line_diff};
 use language_model::{
     LanguageModel, LanguageModelCompletionError, LanguageModelCompletionEvent,
     LanguageModelRegistry, LanguageModelRequest, LanguageModelRequestMessage,
     LanguageModelRequestTool, LanguageModelTextStream, LanguageModelToolChoice,
-    LanguageModelToolUse, Role, TokenUsage, report_assistant_event,
+    LanguageModelToolUse, Role, TokenUsage,
 };
 use multi_buffer::MultiBufferRow;
 use parking_lot::Mutex;
@@ -41,7 +41,6 @@ use std::{
     time::Instant,
 };
 use streaming_diff::{CharOperation, LineDiff, LineOperation, StreamingDiff};
-use telemetry_events::{AssistantEventData, AssistantKind, AssistantPhase};
 use ui::SharedString;
 
 /// Use this tool to provide a message to the user when you're unable to complete a task.
@@ -77,9 +76,9 @@ pub struct BufferCodegen {
     buffer: Entity<MultiBuffer>,
     range: Range<Anchor>,
     initial_transaction_id: Option<TransactionId>,
-    telemetry: Arc<Telemetry>,
     builder: Arc<PromptBuilder>,
     pub is_insertion: bool,
+    session_id: Uuid,
 }
 
 impl BufferCodegen {
@@ -87,7 +86,7 @@ impl BufferCodegen {
         buffer: Entity<MultiBuffer>,
         range: Range<Anchor>,
         initial_transaction_id: Option<TransactionId>,
-        telemetry: Arc<Telemetry>,
+        session_id: Uuid,
         builder: Arc<PromptBuilder>,
         cx: &mut Context<Self>,
     ) -> Self {
@@ -96,8 +95,8 @@ impl BufferCodegen {
                 buffer.clone(),
                 range.clone(),
                 false,
-                Some(telemetry.clone()),
                 builder.clone(),
+                session_id,
                 cx,
             )
         });
@@ -110,8 +109,8 @@ impl BufferCodegen {
             buffer,
             range,
             initial_transaction_id,
-            telemetry,
             builder,
+            session_id,
         };
         this.activate(0, cx);
         this
@@ -134,6 +133,10 @@ impl BufferCodegen {
         &self.alternatives[self.active_alternative]
     }
 
+    pub fn language_name(&self, cx: &App) -> Option<LanguageName> {
+        self.active_alternative().read(cx).language_name(cx)
+    }
+
     pub fn status<'a>(&self, cx: &'a App) -> &'a CodegenStatus {
         &self.active_alternative().read(cx).status
     }
@@ -192,8 +195,8 @@ impl BufferCodegen {
                     self.buffer.clone(),
                     self.range.clone(),
                     false,
-                    Some(self.telemetry.clone()),
                     self.builder.clone(),
+                    self.session_id,
                     cx,
                 )
             }));
@@ -256,6 +259,10 @@ impl BufferCodegen {
     pub fn selected_text<'a>(&self, cx: &'a App) -> Option<&'a str> {
         self.active_alternative().read(cx).selected_text()
     }
+
+    pub fn session_id(&self) -> Uuid {
+        self.session_id
+    }
 }
 
 impl EventEmitter<CodegenEvent> for BufferCodegen {}
@@ -271,7 +278,6 @@ pub struct CodegenAlternative {
     status: CodegenStatus,
     generation: Task<()>,
     diff: Diff,
-    telemetry: Option<Arc<Telemetry>>,
     _subscription: gpui::Subscription,
     builder: Arc<PromptBuilder>,
     active: bool,
@@ -282,6 +288,7 @@ pub struct CodegenAlternative {
     selected_text: Option<String>,
     pub message_id: Option<String>,
     pub model_explanation: Option<SharedString>,
+    session_id: Uuid,
 }
 
 impl EventEmitter<CodegenEvent> for CodegenAlternative {}
@@ -291,8 +298,8 @@ impl CodegenAlternative {
         buffer: Entity<MultiBuffer>,
         range: Range<Anchor>,
         active: bool,
-        telemetry: Option<Arc<Telemetry>>,
         builder: Arc<PromptBuilder>,
+        session_id: Uuid,
         cx: &mut Context<Self>,
     ) -> Self {
         let snapshot = buffer.read(cx).snapshot(cx);
@@ -331,7 +338,6 @@ impl CodegenAlternative {
             status: CodegenStatus::Idle,
             generation: Task::ready(()),
             diff: Diff::default(),
-            telemetry,
             builder,
             active: active,
             edits: Vec::new(),
@@ -341,10 +347,18 @@ impl CodegenAlternative {
             completion: None,
             selected_text: None,
             model_explanation: None,
+            session_id,
             _subscription: cx.subscribe(&buffer, Self::handle_buffer_event),
         }
     }
 
+    pub fn language_name(&self, cx: &App) -> Option<LanguageName> {
+        self.old_buffer
+            .read(cx)
+            .language()
+            .map(|language| language.name())
+    }
+
     pub fn set_active(&mut self, active: bool, cx: &mut Context<Self>) {
         if active != self.active {
             self.active = active;
@@ -407,34 +421,28 @@ impl CodegenAlternative {
 
         self.edit_position = Some(self.range.start.bias_right(&self.snapshot));
 
-        let api_key = model.api_key(cx);
-        let telemetry_id = model.telemetry_id();
-        let provider_id = model.provider_id();
-
         if Self::use_streaming_tools(model.as_ref(), cx) {
             let request = self.build_request(&model, user_prompt, context_task, cx)?;
-            let completion_events =
-                cx.spawn(async move |_, cx| model.stream_completion(request.await, cx).await);
-            self.generation = self.handle_completion(
-                telemetry_id,
-                provider_id.to_string(),
-                api_key,
-                completion_events,
-                cx,
-            );
+            let completion_events = cx.spawn({
+                let model = model.clone();
+                async move |_, cx| model.stream_completion(request.await, cx).await
+            });
+            self.generation = self.handle_completion(model, completion_events, cx);
         } else {
             let stream: LocalBoxFuture<Result<LanguageModelTextStream>> =
                 if user_prompt.trim().to_lowercase() == "delete" {
                     async { Ok(LanguageModelTextStream::default()) }.boxed_local()
                 } else {
                     let request = self.build_request(&model, user_prompt, context_task, cx)?;
-                    cx.spawn(async move |_, cx| {
-                        Ok(model.stream_completion_text(request.await, cx).await?)
+                    cx.spawn({
+                        let model = model.clone();
+                        async move |_, cx| {
+                            Ok(model.stream_completion_text(request.await, cx).await?)
+                        }
                     })
                     .boxed_local()
                 };
-            self.generation =
-                self.handle_stream(telemetry_id, provider_id.to_string(), api_key, stream, cx);
+            self.generation = self.handle_stream(model, stream, cx);
         }
 
         Ok(())
@@ -621,12 +629,14 @@ impl CodegenAlternative {
 
     pub fn handle_stream(
         &mut self,
-        model_telemetry_id: String,
-        model_provider_id: String,
-        model_api_key: Option<String>,
+        model: Arc<dyn LanguageModel>,
         stream: impl 'static + Future<Output = Result<LanguageModelTextStream>>,
         cx: &mut Context<Self>,
     ) -> Task<()> {
+        let anthropic_reporter = language_model::AnthropicEventReporter::new(&model, cx);
+        let session_id = self.session_id;
+        let model_telemetry_id = model.telemetry_id();
+        let model_provider_id = model.provider_id().to_string();
         let start_time = Instant::now();
 
         // Make a new snapshot and re-resolve anchor in case the document was modified.
@@ -664,8 +674,6 @@ impl CodegenAlternative {
             }
         }
 
-        let http_client = cx.http_client();
-        let telemetry = self.telemetry.clone();
         let language_name = {
             let multibuffer = self.buffer.read(cx);
             let snapshot = multibuffer.snapshot(cx);
@@ -698,10 +706,11 @@ impl CodegenAlternative {
                 let model_telemetry_id = model_telemetry_id.clone();
                 let model_provider_id = model_provider_id.clone();
                 let (mut diff_tx, mut diff_rx) = mpsc::channel(1);
-                let executor = cx.background_executor().clone();
                 let message_id = message_id.clone();
-                let line_based_stream_diff: Task<anyhow::Result<()>> =
-                    cx.background_spawn(async move {
+                let line_based_stream_diff: Task<anyhow::Result<()>> = cx.background_spawn({
+                    let anthropic_reporter = anthropic_reporter.clone();
+                    let language_name = language_name.clone();
+                    async move {
                         let mut response_latency = None;
                         let request_start = Instant::now();
                         let diff = async {
@@ -798,27 +807,30 @@ impl CodegenAlternative {
                         let result = diff.await;
 
                         let error_message = result.as_ref().err().map(|error| error.to_string());
-                        report_assistant_event(
-                            AssistantEventData {
-                                conversation_id: None,
-                                message_id,
-                                kind: AssistantKind::Inline,
-                                phase: AssistantPhase::Response,
-                                model: model_telemetry_id,
-                                model_provider: model_provider_id,
-                                response_latency,
-                                error_message,
-                                language_name: language_name.map(|name| name.to_proto()),
-                            },
-                            telemetry,
-                            http_client,
-                            model_api_key,
-                            &executor,
+                        telemetry::event!(
+                            "Assistant Responded",
+                            kind = "inline",
+                            phase = "response",
+                            session_id = session_id.to_string(),
+                            model = model_telemetry_id,
+                            model_provider = model_provider_id,
+                            language_name = language_name.as_ref().map(|n| n.to_string()),
+                            message_id = message_id.as_deref(),
+                            response_latency = response_latency,
+                            error_message = error_message.as_deref(),
                         );
 
+                        anthropic_reporter.report(language_model::AnthropicEventData {
+                            completion_type: language_model::AnthropicCompletionType::Editor,
+                            event: language_model::AnthropicEventType::Response,
+                            language_name: language_name.map(|n| n.to_string()),
+                            message_id,
+                        });
+
                         result?;
                         Ok(())
-                    });
+                    }
+                });
 
                 while let Some((char_ops, line_ops)) = diff_rx.next().await {
                     codegen.update(cx, |codegen, cx| {
@@ -1086,9 +1098,7 @@ impl CodegenAlternative {
 
     fn handle_completion(
         &mut self,
-        telemetry_id: String,
-        provider_id: String,
-        api_key: Option<String>,
+        model: Arc<dyn LanguageModel>,
         completion_stream: Task<
             Result<
                 BoxStream<
@@ -1270,13 +1280,7 @@ impl CodegenAlternative {
 
             let Some(task) = codegen
                 .update(cx, move |codegen, cx| {
-                    codegen.handle_stream(
-                        telemetry_id,
-                        provider_id,
-                        api_key,
-                        async { Ok(language_model_text_stream) },
-                        cx,
-                    )
+                    codegen.handle_stream(model, async { Ok(language_model_text_stream) }, cx)
                 })
                 .ok()
             else {
@@ -1448,6 +1452,7 @@ mod tests {
     use gpui::TestAppContext;
     use indoc::indoc;
     use language::{Buffer, Point};
+    use language_model::fake_provider::FakeLanguageModel;
     use language_model::{LanguageModelRegistry, TokenUsage};
     use languages::rust_lang;
     use rand::prelude::*;
@@ -1478,8 +1483,8 @@ mod tests {
                 buffer.clone(),
                 range.clone(),
                 true,
-                None,
                 prompt_builder,
+                Uuid::new_v4(),
                 cx,
             )
         });
@@ -1540,8 +1545,8 @@ mod tests {
                 buffer.clone(),
                 range.clone(),
                 true,
-                None,
                 prompt_builder,
+                Uuid::new_v4(),
                 cx,
             )
         });
@@ -1604,8 +1609,8 @@ mod tests {
                 buffer.clone(),
                 range.clone(),
                 true,
-                None,
                 prompt_builder,
+                Uuid::new_v4(),
                 cx,
             )
         });
@@ -1668,8 +1673,8 @@ mod tests {
                 buffer.clone(),
                 range.clone(),
                 true,
-                None,
                 prompt_builder,
+                Uuid::new_v4(),
                 cx,
             )
         });
@@ -1720,8 +1725,8 @@ mod tests {
                 buffer.clone(),
                 range.clone(),
                 false,
-                None,
                 prompt_builder,
+                Uuid::new_v4(),
                 cx,
             )
         });
@@ -1810,11 +1815,10 @@ mod tests {
         cx: &mut TestAppContext,
     ) -> mpsc::UnboundedSender<String> {
         let (chunks_tx, chunks_rx) = mpsc::unbounded();
+        let model = Arc::new(FakeLanguageModel::default());
         codegen.update(cx, |codegen, cx| {
             codegen.generation = codegen.handle_stream(
-                String::new(),
-                String::new(),
-                None,
+                model,
                 future::ready(Ok(LanguageModelTextStream {
                     message_id: None,
                     stream: chunks_rx.map(Ok).boxed(),

crates/agent_ui/src/inline_assistant.rs 🔗

@@ -1,8 +1,11 @@
+use language_model::AnthropicEventData;
+use language_model::report_anthropic_event;
 use std::cmp;
 use std::mem;
 use std::ops::Range;
 use std::rc::Rc;
 use std::sync::Arc;
+use uuid::Uuid;
 
 use crate::context::load_context;
 use crate::mention_set::MentionSet;
@@ -15,7 +18,6 @@ use crate::{
 use agent::HistoryStore;
 use agent_settings::AgentSettings;
 use anyhow::{Context as _, Result};
-use client::telemetry::Telemetry;
 use collections::{HashMap, HashSet, VecDeque, hash_map};
 use editor::EditorSnapshot;
 use editor::MultiBufferOffset;
@@ -38,15 +40,13 @@ use gpui::{
     WeakEntity, Window, point,
 };
 use language::{Buffer, Point, Selection, TransactionId};
-use language_model::{
-    ConfigurationError, ConfiguredModel, LanguageModelRegistry, report_assistant_event,
-};
+use language_model::{ConfigurationError, ConfiguredModel, LanguageModelRegistry};
 use multi_buffer::MultiBufferRow;
 use parking_lot::Mutex;
 use project::{CodeAction, DisableAiSettings, LspAction, Project, ProjectTransaction};
 use prompt_store::{PromptBuilder, PromptStore};
 use settings::{Settings, SettingsStore};
-use telemetry_events::{AssistantEventData, AssistantKind, AssistantPhase};
+
 use terminal_view::{TerminalView, terminal_panel::TerminalPanel};
 use text::{OffsetRangeExt, ToPoint as _};
 use ui::prelude::*;
@@ -54,13 +54,8 @@ use util::{RangeExt, ResultExt, maybe};
 use workspace::{ItemHandle, Toast, Workspace, dock::Panel, notifications::NotificationId};
 use zed_actions::agent::OpenSettings;
 
-pub fn init(
-    fs: Arc<dyn Fs>,
-    prompt_builder: Arc<PromptBuilder>,
-    telemetry: Arc<Telemetry>,
-    cx: &mut App,
-) {
-    cx.set_global(InlineAssistant::new(fs, prompt_builder, telemetry));
+pub fn init(fs: Arc<dyn Fs>, prompt_builder: Arc<PromptBuilder>, cx: &mut App) {
+    cx.set_global(InlineAssistant::new(fs, prompt_builder));
 
     cx.observe_global::<SettingsStore>(|cx| {
         if DisableAiSettings::get_global(cx).disable_ai {
@@ -100,7 +95,6 @@ pub struct InlineAssistant {
     confirmed_assists: HashMap<InlineAssistId, Entity<CodegenAlternative>>,
     prompt_history: VecDeque<String>,
     prompt_builder: Arc<PromptBuilder>,
-    telemetry: Arc<Telemetry>,
     fs: Arc<dyn Fs>,
     _inline_assistant_completions: Option<mpsc::UnboundedSender<anyhow::Result<InlineAssistId>>>,
 }
@@ -108,11 +102,7 @@ pub struct InlineAssistant {
 impl Global for InlineAssistant {}
 
 impl InlineAssistant {
-    pub fn new(
-        fs: Arc<dyn Fs>,
-        prompt_builder: Arc<PromptBuilder>,
-        telemetry: Arc<Telemetry>,
-    ) -> Self {
+    pub fn new(fs: Arc<dyn Fs>, prompt_builder: Arc<PromptBuilder>) -> Self {
         Self {
             next_assist_id: InlineAssistId::default(),
             next_assist_group_id: InlineAssistGroupId::default(),
@@ -122,7 +112,6 @@ impl InlineAssistant {
             confirmed_assists: HashMap::default(),
             prompt_history: VecDeque::default(),
             prompt_builder,
-            telemetry,
             fs,
             _inline_assistant_completions: None,
         }
@@ -457,17 +446,25 @@ impl InlineAssistant {
             codegen_ranges.push(anchor_range);
 
             if let Some(model) = LanguageModelRegistry::read_global(cx).inline_assistant_model() {
-                self.telemetry.report_assistant_event(AssistantEventData {
-                    conversation_id: None,
-                    kind: AssistantKind::Inline,
-                    phase: AssistantPhase::Invoked,
-                    message_id: None,
-                    model: model.model.telemetry_id(),
-                    model_provider: model.provider.id().to_string(),
-                    response_latency: None,
-                    error_message: None,
-                    language_name: buffer.language().map(|language| language.name().to_proto()),
-                });
+                telemetry::event!(
+                    "Assistant Invoked",
+                    kind = "inline",
+                    phase = "invoked",
+                    model = model.model.telemetry_id(),
+                    model_provider = model.provider.id().to_string(),
+                    language_name = buffer.language().map(|language| language.name().to_proto())
+                );
+
+                report_anthropic_event(
+                    &model.model,
+                    AnthropicEventData {
+                        completion_type: language_model::AnthropicCompletionType::Editor,
+                        event: language_model::AnthropicEventType::Invoked,
+                        language_name: buffer.language().map(|language| language.name().to_proto()),
+                        message_id: None,
+                    },
+                    cx,
+                );
             }
         }
 
@@ -491,6 +488,7 @@ impl InlineAssistant {
         let snapshot = editor.update(cx, |editor, cx| editor.snapshot(window, cx));
 
         let assist_group_id = self.next_assist_group_id.post_inc();
+        let session_id = Uuid::new_v4();
         let prompt_buffer = cx.new(|cx| {
             MultiBuffer::singleton(
                 cx.new(|cx| Buffer::local(initial_prompt.unwrap_or_default(), cx)),
@@ -508,7 +506,7 @@ impl InlineAssistant {
                     editor.read(cx).buffer().clone(),
                     range.clone(),
                     initial_transaction_id,
-                    self.telemetry.clone(),
+                    session_id,
                     self.prompt_builder.clone(),
                     cx,
                 )
@@ -522,6 +520,7 @@ impl InlineAssistant {
                     self.prompt_history.clone(),
                     prompt_buffer.clone(),
                     codegen.clone(),
+                    session_id,
                     self.fs.clone(),
                     thread_store.clone(),
                     prompt_store.clone(),
@@ -1069,8 +1068,6 @@ impl InlineAssistant {
             }
 
             let active_alternative = assist.codegen.read(cx).active_alternative().clone();
-            let message_id = active_alternative.read(cx).message_id.clone();
-
             if let Some(model) = LanguageModelRegistry::read_global(cx).inline_assistant_model() {
                 let language_name = assist.editor.upgrade().and_then(|editor| {
                     let multibuffer = editor.read(cx).buffer().read(cx);
@@ -1079,28 +1076,49 @@ impl InlineAssistant {
                     ranges
                         .first()
                         .and_then(|(buffer, _, _)| buffer.language())
-                        .map(|language| language.name())
+                        .map(|language| language.name().0.to_string())
                 });
-                report_assistant_event(
-                    AssistantEventData {
-                        conversation_id: None,
-                        kind: AssistantKind::Inline,
+
+                let codegen = assist.codegen.read(cx);
+                let session_id = codegen.session_id();
+                let message_id = active_alternative.read(cx).message_id.clone();
+                let model_telemetry_id = model.model.telemetry_id();
+                let model_provider_id = model.model.provider_id().to_string();
+
+                let (phase, event_type, anthropic_event_type) = if undo {
+                    (
+                        "rejected",
+                        "Assistant Response Rejected",
+                        language_model::AnthropicEventType::Reject,
+                    )
+                } else {
+                    (
+                        "accepted",
+                        "Assistant Response Accepted",
+                        language_model::AnthropicEventType::Accept,
+                    )
+                };
+
+                telemetry::event!(
+                    event_type,
+                    phase,
+                    session_id = session_id.to_string(),
+                    kind = "inline",
+                    model = model_telemetry_id,
+                    model_provider = model_provider_id,
+                    language_name = language_name,
+                    message_id = message_id.as_deref(),
+                );
+
+                report_anthropic_event(
+                    &model.model,
+                    language_model::AnthropicEventData {
+                        completion_type: language_model::AnthropicCompletionType::Editor,
+                        event: anthropic_event_type,
+                        language_name,
                         message_id,
-                        phase: if undo {
-                            AssistantPhase::Rejected
-                        } else {
-                            AssistantPhase::Accepted
-                        },
-                        model: model.model.telemetry_id(),
-                        model_provider: model.model.provider_id().to_string(),
-                        response_latency: None,
-                        error_message: None,
-                        language_name: language_name.map(|name| name.to_proto()),
                     },
-                    Some(self.telemetry.clone()),
-                    cx.http_client(),
-                    model.model.api_key(cx),
-                    cx.background_executor(),
+                    cx,
                 );
             }
 
@@ -2036,8 +2054,7 @@ pub mod test {
             cx.set_http_client(http);
             Client::production(cx)
         });
-        let mut inline_assistant =
-            InlineAssistant::new(fs.clone(), prompt_builder, client.telemetry().clone());
+        let mut inline_assistant = InlineAssistant::new(fs.clone(), prompt_builder);
 
         let (tx, mut completion_rx) = mpsc::unbounded();
         inline_assistant.set_completion_receiver(tx);

crates/agent_ui/src/inline_prompt_editor.rs 🔗

@@ -8,7 +8,7 @@ use editor::{
     ContextMenuOptions, Editor, EditorElement, EditorEvent, EditorMode, EditorStyle, MultiBuffer,
     actions::{MoveDown, MoveUp},
 };
-use feature_flags::{FeatureFlag, FeatureFlagAppExt};
+use feature_flags::{FeatureFlagAppExt, InlineAssistantUseToolFeatureFlag};
 use fs::Fs;
 use gpui::{
     AnyElement, App, ClipboardItem, Context, Entity, EventEmitter, FocusHandle, Focusable,
@@ -20,10 +20,10 @@ use parking_lot::Mutex;
 use project::Project;
 use prompt_store::PromptStore;
 use settings::Settings;
+use std::cmp;
 use std::ops::Range;
 use std::rc::Rc;
 use std::sync::Arc;
-use std::{cmp, mem};
 use theme::ThemeSettings;
 use ui::utils::WithRemSize;
 use ui::{IconButtonShape, KeyBinding, PopoverMenuHandle, Tooltip, prelude::*};
@@ -44,54 +44,15 @@ use crate::{CycleNextInlineAssist, CyclePreviousInlineAssist, ModelUsageContext}
 
 actions!(inline_assistant, [ThumbsUpResult, ThumbsDownResult]);
 
-pub struct InlineAssistRatingFeatureFlag;
-
-impl FeatureFlag for InlineAssistRatingFeatureFlag {
-    const NAME: &'static str = "inline-assist-rating";
-
-    fn enabled_for_staff() -> bool {
-        false
-    }
-}
-
-enum RatingState {
+enum CompletionState {
     Pending,
-    GeneratedCompletion(Option<String>),
-    Rated(Uuid),
+    Generated { completion_text: Option<String> },
+    Rated,
 }
 
-impl RatingState {
-    fn is_pending(&self) -> bool {
-        matches!(self, RatingState::Pending)
-    }
-
-    fn rating_id(&self) -> Option<Uuid> {
-        match self {
-            RatingState::Pending => None,
-            RatingState::GeneratedCompletion(_) => None,
-            RatingState::Rated(id) => Some(*id),
-        }
-    }
-
-    fn rate(&mut self) -> (Uuid, Option<String>) {
-        let id = Uuid::new_v4();
-        let old_state = mem::replace(self, RatingState::Rated(id));
-        let completion = match old_state {
-            RatingState::Pending => None,
-            RatingState::GeneratedCompletion(completion) => completion,
-            RatingState::Rated(_) => None,
-        };
-
-        (id, completion)
-    }
-
-    fn reset(&mut self) {
-        *self = RatingState::Pending;
-    }
-
-    fn generated_completion(&mut self, generated_completion: Option<String>) {
-        *self = RatingState::GeneratedCompletion(generated_completion);
-    }
+struct SessionState {
+    session_id: Uuid,
+    completion: CompletionState,
 }
 
 pub struct PromptEditor<T> {
@@ -109,7 +70,7 @@ pub struct PromptEditor<T> {
     _codegen_subscription: Subscription,
     editor_subscriptions: Vec<Subscription>,
     show_rate_limit_notice: bool,
-    rated: RatingState,
+    session_state: SessionState,
     _phantom: std::marker::PhantomData<T>,
 }
 
@@ -487,7 +448,7 @@ impl<T: 'static> PromptEditor<T> {
                 }
 
                 self.edited_since_done = true;
-                self.rated.reset();
+                self.session_state.completion = CompletionState::Pending;
                 cx.notify();
             }
             EditorEvent::Blurred => {
@@ -559,109 +520,165 @@ impl<T: 'static> PromptEditor<T> {
     fn confirm(&mut self, _: &menu::Confirm, _window: &mut Window, cx: &mut Context<Self>) {
         match self.codegen_status(cx) {
             CodegenStatus::Idle => {
+                self.fire_started_telemetry(cx);
                 cx.emit(PromptEditorEvent::StartRequested);
             }
             CodegenStatus::Pending => {}
             CodegenStatus::Done => {
                 if self.edited_since_done {
+                    self.fire_started_telemetry(cx);
                     cx.emit(PromptEditorEvent::StartRequested);
                 } else {
                     cx.emit(PromptEditorEvent::ConfirmRequested { execute: false });
                 }
             }
             CodegenStatus::Error(_) => {
+                self.fire_started_telemetry(cx);
                 cx.emit(PromptEditorEvent::StartRequested);
             }
         }
     }
 
-    fn thumbs_up(&mut self, _: &ThumbsUpResult, _window: &mut Window, cx: &mut Context<Self>) {
-        if self.rated.is_pending() {
-            self.toast("Still generating...", None, cx);
+    fn fire_started_telemetry(&self, cx: &Context<Self>) {
+        let Some(model) = LanguageModelRegistry::read_global(cx).inline_assistant_model() else {
             return;
-        }
-
-        if let Some(rating_id) = self.rated.rating_id() {
-            self.toast("Already rated this completion", Some(rating_id), cx);
-            return;
-        }
+        };
 
-        let (rating_id, completion) = self.rated.rate();
+        let model_telemetry_id = model.model.telemetry_id();
+        let model_provider_id = model.provider.id().to_string();
 
-        let selected_text = match &self.mode {
+        let (kind, language_name) = match &self.mode {
             PromptEditorMode::Buffer { codegen, .. } => {
-                codegen.read(cx).selected_text(cx).map(|s| s.to_string())
+                let codegen = codegen.read(cx);
+                (
+                    "inline",
+                    codegen.language_name(cx).map(|name| name.to_string()),
+                )
             }
-            PromptEditorMode::Terminal { .. } => None,
+            PromptEditorMode::Terminal { .. } => ("inline_terminal", None),
         };
 
-        let model_info = self.model_selector.read(cx).active_model(cx);
-        let model_id = {
-            let Some(configured_model) = model_info else {
-                self.toast("No configured model", None, cx);
-                return;
-            };
+        telemetry::event!(
+            "Assistant Started",
+            session_id = self.session_state.session_id.to_string(),
+            kind = kind,
+            phase = "started",
+            model = model_telemetry_id,
+            model_provider = model_provider_id,
+            language_name = language_name,
+        );
+    }
 
-            configured_model.model.telemetry_id()
-        };
+    fn thumbs_up(&mut self, _: &ThumbsUpResult, _window: &mut Window, cx: &mut Context<Self>) {
+        match &self.session_state.completion {
+            CompletionState::Pending => {
+                self.toast("Can't rate, still generating...", None, cx);
+                return;
+            }
+            CompletionState::Rated => {
+                self.toast(
+                    "Already rated this completion",
+                    Some(self.session_state.session_id),
+                    cx,
+                );
+                return;
+            }
+            CompletionState::Generated { completion_text } => {
+                let model_info = self.model_selector.read(cx).active_model(cx);
+                let model_id = {
+                    let Some(configured_model) = model_info else {
+                        self.toast("No configured model", None, cx);
+                        return;
+                    };
+                    configured_model.model.telemetry_id()
+                };
 
-        let prompt = self.editor.read(cx).text(cx);
+                let selected_text = match &self.mode {
+                    PromptEditorMode::Buffer { codegen, .. } => {
+                        codegen.read(cx).selected_text(cx).map(|s| s.to_string())
+                    }
+                    PromptEditorMode::Terminal { .. } => None,
+                };
 
-        telemetry::event!(
-            "Inline Assistant Rated",
-            rating = "positive",
-            model = model_id,
-            prompt = prompt,
-            completion = completion,
-            selected_text = selected_text,
-            rating_id = rating_id.to_string()
-        );
+                let prompt = self.editor.read(cx).text(cx);
 
-        cx.notify();
-    }
+                let kind = match &self.mode {
+                    PromptEditorMode::Buffer { .. } => "inline",
+                    PromptEditorMode::Terminal { .. } => "inline_terminal",
+                };
 
-    fn thumbs_down(&mut self, _: &ThumbsDownResult, _window: &mut Window, cx: &mut Context<Self>) {
-        if self.rated.is_pending() {
-            self.toast("Still generating...", None, cx);
-            return;
-        }
-        if let Some(rating_id) = self.rated.rating_id() {
-            self.toast("Already rated this completion", Some(rating_id), cx);
-            return;
-        }
+                telemetry::event!(
+                    "Inline Assistant Rated",
+                    rating = "positive",
+                    session_id = self.session_state.session_id.to_string(),
+                    kind = kind,
+                    model = model_id,
+                    prompt = prompt,
+                    completion = completion_text,
+                    selected_text = selected_text,
+                );
 
-        let (rating_id, completion) = self.rated.rate();
+                self.session_state.completion = CompletionState::Rated;
 
-        let selected_text = match &self.mode {
-            PromptEditorMode::Buffer { codegen, .. } => {
-                codegen.read(cx).selected_text(cx).map(|s| s.to_string())
+                cx.notify();
             }
-            PromptEditorMode::Terminal { .. } => None,
-        };
+        }
+    }
 
-        let model_info = self.model_selector.read(cx).active_model(cx);
-        let model_telemetry_id = {
-            let Some(configured_model) = model_info else {
-                self.toast("No configured model", None, cx);
+    fn thumbs_down(&mut self, _: &ThumbsDownResult, _window: &mut Window, cx: &mut Context<Self>) {
+        match &self.session_state.completion {
+            CompletionState::Pending => {
+                self.toast("Can't rate, still generating...", None, cx);
                 return;
-            };
+            }
+            CompletionState::Rated => {
+                self.toast(
+                    "Already rated this completion",
+                    Some(self.session_state.session_id),
+                    cx,
+                );
+                return;
+            }
+            CompletionState::Generated { completion_text } => {
+                let model_info = self.model_selector.read(cx).active_model(cx);
+                let model_telemetry_id = {
+                    let Some(configured_model) = model_info else {
+                        self.toast("No configured model", None, cx);
+                        return;
+                    };
+                    configured_model.model.telemetry_id()
+                };
 
-            configured_model.model.telemetry_id()
-        };
+                let selected_text = match &self.mode {
+                    PromptEditorMode::Buffer { codegen, .. } => {
+                        codegen.read(cx).selected_text(cx).map(|s| s.to_string())
+                    }
+                    PromptEditorMode::Terminal { .. } => None,
+                };
 
-        let prompt = self.editor.read(cx).text(cx);
+                let prompt = self.editor.read(cx).text(cx);
 
-        telemetry::event!(
-            "Inline Assistant Rated",
-            rating = "negative",
-            model = model_telemetry_id,
-            prompt = prompt,
-            completion = completion,
-            selected_text = selected_text,
-            rating_id = rating_id.to_string()
-        );
+                let kind = match &self.mode {
+                    PromptEditorMode::Buffer { .. } => "inline",
+                    PromptEditorMode::Terminal { .. } => "inline_terminal",
+                };
+
+                telemetry::event!(
+                    "Inline Assistant Rated",
+                    rating = "negative",
+                    session_id = self.session_state.session_id.to_string(),
+                    kind = kind,
+                    model = model_telemetry_id,
+                    prompt = prompt,
+                    completion = completion_text,
+                    selected_text = selected_text,
+                );
+
+                self.session_state.completion = CompletionState::Rated;
 
-        cx.notify();
+                cx.notify();
+            }
+        }
     }
 
     fn toast(&mut self, msg: &str, uuid: Option<Uuid>, cx: &mut Context<'_, PromptEditor<T>>) {
@@ -795,8 +812,8 @@ impl<T: 'static> PromptEditor<T> {
                             .into_any_element(),
                     ]
                 } else {
-                    let show_rating_buttons = cx.has_flag::<InlineAssistRatingFeatureFlag>();
-                    let rated = self.rated.rating_id().is_some();
+                    let show_rating_buttons = cx.has_flag::<InlineAssistantUseToolFeatureFlag>();
+                    let rated = matches!(self.session_state.completion, CompletionState::Rated);
 
                     let accept = IconButton::new("accept", IconName::Check)
                         .icon_color(Color::Info)
@@ -1120,6 +1137,7 @@ impl PromptEditor<BufferCodegen> {
         prompt_history: VecDeque<String>,
         prompt_buffer: Entity<MultiBuffer>,
         codegen: Entity<BufferCodegen>,
+        session_id: Uuid,
         fs: Arc<dyn Fs>,
         history_store: Entity<HistoryStore>,
         prompt_store: Option<Entity<PromptStore>>,
@@ -1190,7 +1208,10 @@ impl PromptEditor<BufferCodegen> {
             editor_subscriptions: Vec::new(),
             show_rate_limit_notice: false,
             mode,
-            rated: RatingState::Pending,
+            session_state: SessionState {
+                session_id,
+                completion: CompletionState::Pending,
+            },
             _phantom: Default::default(),
         };
 
@@ -1210,13 +1231,15 @@ impl PromptEditor<BufferCodegen> {
                     .update(cx, |editor, _| editor.set_read_only(false));
             }
             CodegenStatus::Pending => {
-                self.rated.reset();
+                self.session_state.completion = CompletionState::Pending;
                 self.editor
                     .update(cx, |editor, _| editor.set_read_only(true));
             }
             CodegenStatus::Done => {
                 let completion = codegen.read(cx).active_completion(cx);
-                self.rated.generated_completion(completion);
+                self.session_state.completion = CompletionState::Generated {
+                    completion_text: completion,
+                };
                 self.edited_since_done = false;
                 self.editor
                     .update(cx, |editor, _| editor.set_read_only(false));
@@ -1272,6 +1295,7 @@ impl PromptEditor<TerminalCodegen> {
         prompt_history: VecDeque<String>,
         prompt_buffer: Entity<MultiBuffer>,
         codegen: Entity<TerminalCodegen>,
+        session_id: Uuid,
         fs: Arc<dyn Fs>,
         history_store: Entity<HistoryStore>,
         prompt_store: Option<Entity<PromptStore>>,
@@ -1337,7 +1361,10 @@ impl PromptEditor<TerminalCodegen> {
             editor_subscriptions: Vec::new(),
             mode,
             show_rate_limit_notice: false,
-            rated: RatingState::Pending,
+            session_state: SessionState {
+                session_id,
+                completion: CompletionState::Pending,
+            },
             _phantom: Default::default(),
         };
         this.count_lines(cx);
@@ -1377,13 +1404,14 @@ impl PromptEditor<TerminalCodegen> {
                     .update(cx, |editor, _| editor.set_read_only(false));
             }
             CodegenStatus::Pending => {
-                self.rated = RatingState::Pending;
+                self.session_state.completion = CompletionState::Pending;
                 self.editor
                     .update(cx, |editor, _| editor.set_read_only(true));
             }
             CodegenStatus::Done | CodegenStatus::Error(_) => {
-                self.rated
-                    .generated_completion(codegen.read(cx).completion());
+                self.session_state.completion = CompletionState::Generated {
+                    completion_text: codegen.read(cx).completion(),
+                };
                 self.edited_since_done = false;
                 self.editor
                     .update(cx, |editor, _| editor.set_read_only(false));

crates/agent_ui/src/terminal_codegen.rs 🔗

@@ -1,37 +1,38 @@
 use crate::inline_prompt_editor::CodegenStatus;
-use client::telemetry::Telemetry;
 use futures::{SinkExt, StreamExt, channel::mpsc};
 use gpui::{App, AppContext as _, Context, Entity, EventEmitter, Task};
-use language_model::{
-    ConfiguredModel, LanguageModelRegistry, LanguageModelRequest, report_assistant_event,
-};
-use std::{sync::Arc, time::Instant};
-use telemetry_events::{AssistantEventData, AssistantKind, AssistantPhase};
+use language_model::{ConfiguredModel, LanguageModelRegistry, LanguageModelRequest};
+use std::time::Instant;
 use terminal::Terminal;
+use uuid::Uuid;
 
 pub struct TerminalCodegen {
     pub status: CodegenStatus,
-    pub telemetry: Option<Arc<Telemetry>>,
     terminal: Entity<Terminal>,
     generation: Task<()>,
     pub message_id: Option<String>,
     transaction: Option<TerminalTransaction>,
+    session_id: Uuid,
 }
 
 impl EventEmitter<CodegenEvent> for TerminalCodegen {}
 
 impl TerminalCodegen {
-    pub fn new(terminal: Entity<Terminal>, telemetry: Option<Arc<Telemetry>>) -> Self {
+    pub fn new(terminal: Entity<Terminal>, session_id: Uuid) -> Self {
         Self {
             terminal,
-            telemetry,
             status: CodegenStatus::Idle,
             generation: Task::ready(()),
             message_id: None,
             transaction: None,
+            session_id,
         }
     }
 
+    pub fn session_id(&self) -> Uuid {
+        self.session_id
+    }
+
     pub fn start(&mut self, prompt_task: Task<LanguageModelRequest>, cx: &mut Context<Self>) {
         let Some(ConfiguredModel { model, .. }) =
             LanguageModelRegistry::read_global(cx).inline_assistant_model()
@@ -39,15 +40,15 @@ impl TerminalCodegen {
             return;
         };
 
-        let model_api_key = model.api_key(cx);
-        let http_client = cx.http_client();
-        let telemetry = self.telemetry.clone();
+        let anthropic_reporter = language_model::AnthropicEventReporter::new(&model, cx);
+        let session_id = self.session_id;
+        let model_telemetry_id = model.telemetry_id();
+        let model_provider_id = model.provider_id().to_string();
+
         self.status = CodegenStatus::Pending;
         self.transaction = Some(TerminalTransaction::start(self.terminal.clone()));
         self.generation = cx.spawn(async move |this, cx| {
             let prompt = prompt_task.await;
-            let model_telemetry_id = model.telemetry_id();
-            let model_provider_id = model.provider_id();
             let response = model.stream_completion_text(prompt, cx).await;
             let generate = async {
                 let message_id = response
@@ -59,7 +60,7 @@ impl TerminalCodegen {
 
                 let task = cx.background_spawn({
                     let message_id = message_id.clone();
-                    let executor = cx.background_executor().clone();
+                    let anthropic_reporter = anthropic_reporter.clone();
                     async move {
                         let mut response_latency = None;
                         let request_start = Instant::now();
@@ -79,24 +80,27 @@ impl TerminalCodegen {
                         let result = task.await;
 
                         let error_message = result.as_ref().err().map(|error| error.to_string());
-                        report_assistant_event(
-                            AssistantEventData {
-                                conversation_id: None,
-                                kind: AssistantKind::InlineTerminal,
-                                message_id,
-                                phase: AssistantPhase::Response,
-                                model: model_telemetry_id,
-                                model_provider: model_provider_id.to_string(),
-                                response_latency,
-                                error_message,
-                                language_name: None,
-                            },
-                            telemetry,
-                            http_client,
-                            model_api_key,
-                            &executor,
+
+                        telemetry::event!(
+                            "Assistant Responded",
+                            session_id = session_id.to_string(),
+                            kind = "inline_terminal",
+                            phase = "response",
+                            model = model_telemetry_id,
+                            model_provider = model_provider_id,
+                            language_name = Option::<&str>::None,
+                            message_id = message_id,
+                            response_latency = response_latency,
+                            error_message = error_message,
                         );
 
+                        anthropic_reporter.report(language_model::AnthropicEventData {
+                            completion_type: language_model::AnthropicCompletionType::Terminal,
+                            event: language_model::AnthropicEventType::Response,
+                            language_name: None,
+                            message_id,
+                        });
+
                         result?;
                         anyhow::Ok(())
                     }

crates/agent_ui/src/terminal_inline_assistant.rs 🔗

@@ -8,7 +8,7 @@ use crate::{
 use agent::HistoryStore;
 use agent_settings::AgentSettings;
 use anyhow::{Context as _, Result};
-use client::telemetry::Telemetry;
+
 use cloud_llm_client::CompletionIntent;
 use collections::{HashMap, VecDeque};
 use editor::{MultiBuffer, actions::SelectAll};
@@ -17,24 +17,19 @@ use gpui::{App, Entity, Focusable, Global, Subscription, Task, UpdateGlobal, Wea
 use language::Buffer;
 use language_model::{
     ConfiguredModel, LanguageModelRegistry, LanguageModelRequest, LanguageModelRequestMessage,
-    Role, report_assistant_event,
+    Role, report_anthropic_event,
 };
 use project::Project;
 use prompt_store::{PromptBuilder, PromptStore};
 use std::sync::Arc;
-use telemetry_events::{AssistantEventData, AssistantKind, AssistantPhase};
 use terminal_view::TerminalView;
 use ui::prelude::*;
 use util::ResultExt;
+use uuid::Uuid;
 use workspace::{Toast, Workspace, notifications::NotificationId};
 
-pub fn init(
-    fs: Arc<dyn Fs>,
-    prompt_builder: Arc<PromptBuilder>,
-    telemetry: Arc<Telemetry>,
-    cx: &mut App,
-) {
-    cx.set_global(TerminalInlineAssistant::new(fs, prompt_builder, telemetry));
+pub fn init(fs: Arc<dyn Fs>, prompt_builder: Arc<PromptBuilder>, cx: &mut App) {
+    cx.set_global(TerminalInlineAssistant::new(fs, prompt_builder));
 }
 
 const DEFAULT_CONTEXT_LINES: usize = 50;
@@ -44,7 +39,6 @@ pub struct TerminalInlineAssistant {
     next_assist_id: TerminalInlineAssistId,
     assists: HashMap<TerminalInlineAssistId, TerminalInlineAssist>,
     prompt_history: VecDeque<String>,
-    telemetry: Option<Arc<Telemetry>>,
     fs: Arc<dyn Fs>,
     prompt_builder: Arc<PromptBuilder>,
 }
@@ -52,16 +46,11 @@ pub struct TerminalInlineAssistant {
 impl Global for TerminalInlineAssistant {}
 
 impl TerminalInlineAssistant {
-    pub fn new(
-        fs: Arc<dyn Fs>,
-        prompt_builder: Arc<PromptBuilder>,
-        telemetry: Arc<Telemetry>,
-    ) -> Self {
+    pub fn new(fs: Arc<dyn Fs>, prompt_builder: Arc<PromptBuilder>) -> Self {
         Self {
             next_assist_id: TerminalInlineAssistId::default(),
             assists: HashMap::default(),
             prompt_history: VecDeque::default(),
-            telemetry: Some(telemetry),
             fs,
             prompt_builder,
         }
@@ -80,13 +69,14 @@ impl TerminalInlineAssistant {
     ) {
         let terminal = terminal_view.read(cx).terminal().clone();
         let assist_id = self.next_assist_id.post_inc();
+        let session_id = Uuid::new_v4();
         let prompt_buffer = cx.new(|cx| {
             MultiBuffer::singleton(
                 cx.new(|cx| Buffer::local(initial_prompt.unwrap_or_default(), cx)),
                 cx,
             )
         });
-        let codegen = cx.new(|_| TerminalCodegen::new(terminal, self.telemetry.clone()));
+        let codegen = cx.new(|_| TerminalCodegen::new(terminal, session_id));
 
         let prompt_editor = cx.new(|cx| {
             PromptEditor::new_terminal(
@@ -94,6 +84,7 @@ impl TerminalInlineAssistant {
                 self.prompt_history.clone(),
                 prompt_buffer.clone(),
                 codegen,
+                session_id,
                 self.fs.clone(),
                 thread_store.clone(),
                 prompt_store.clone(),
@@ -309,27 +300,45 @@ impl TerminalInlineAssistant {
                 LanguageModelRegistry::read_global(cx).inline_assistant_model()
             {
                 let codegen = assist.codegen.read(cx);
-                let executor = cx.background_executor().clone();
-                report_assistant_event(
-                    AssistantEventData {
-                        conversation_id: None,
-                        kind: AssistantKind::InlineTerminal,
-                        message_id: codegen.message_id.clone(),
-                        phase: if undo {
-                            AssistantPhase::Rejected
-                        } else {
-                            AssistantPhase::Accepted
-                        },
-                        model: model.telemetry_id(),
-                        model_provider: model.provider_id().to_string(),
-                        response_latency: None,
-                        error_message: None,
+                let session_id = codegen.session_id();
+                let message_id = codegen.message_id.clone();
+                let model_telemetry_id = model.telemetry_id();
+                let model_provider_id = model.provider_id().to_string();
+
+                let (phase, event_type, anthropic_event_type) = if undo {
+                    (
+                        "rejected",
+                        "Assistant Response Rejected",
+                        language_model::AnthropicEventType::Reject,
+                    )
+                } else {
+                    (
+                        "accepted",
+                        "Assistant Response Accepted",
+                        language_model::AnthropicEventType::Accept,
+                    )
+                };
+
+                // Fire Zed telemetry
+                telemetry::event!(
+                    event_type,
+                    kind = "inline_terminal",
+                    phase = phase,
+                    model = model_telemetry_id,
+                    model_provider = model_provider_id,
+                    message_id = message_id,
+                    session_id = session_id,
+                );
+
+                report_anthropic_event(
+                    &model,
+                    language_model::AnthropicEventData {
+                        completion_type: language_model::AnthropicCompletionType::Terminal,
+                        event: anthropic_event_type,
                         language_name: None,
+                        message_id,
                     },
-                    codegen.telemetry.clone(),
-                    cx.http_client(),
-                    model.api_key(cx),
-                    &executor,
+                    cx,
                 );
             }
 

crates/assistant_text_thread/Cargo.toml 🔗

@@ -46,7 +46,7 @@ serde_json.workspace = true
 settings.workspace = true
 smallvec.workspace = true
 smol.workspace = true
-telemetry_events.workspace = true
+telemetry.workspace = true
 text.workspace = true
 ui.workspace = true
 util.workspace = true

crates/assistant_text_thread/src/assistant_text_thread_tests.rs 🔗

@@ -50,7 +50,6 @@ fn test_inserting_and_removing_messages(cx: &mut App) {
         TextThread::local(
             registry,
             None,
-            None,
             prompt_builder.clone(),
             Arc::new(SlashCommandWorkingSet::default()),
             cx,
@@ -189,7 +188,6 @@ fn test_message_splitting(cx: &mut App) {
         TextThread::local(
             registry.clone(),
             None,
-            None,
             prompt_builder.clone(),
             Arc::new(SlashCommandWorkingSet::default()),
             cx,
@@ -294,7 +292,6 @@ fn test_messages_for_offsets(cx: &mut App) {
         TextThread::local(
             registry,
             None,
-            None,
             prompt_builder.clone(),
             Arc::new(SlashCommandWorkingSet::default()),
             cx,
@@ -405,7 +402,6 @@ async fn test_slash_commands(cx: &mut TestAppContext) {
         TextThread::local(
             registry.clone(),
             None,
-            None,
             prompt_builder.clone(),
             Arc::new(SlashCommandWorkingSet::default()),
             cx,
@@ -677,7 +673,6 @@ async fn test_serialization(cx: &mut TestAppContext) {
         TextThread::local(
             registry.clone(),
             None,
-            None,
             prompt_builder.clone(),
             Arc::new(SlashCommandWorkingSet::default()),
             cx,
@@ -724,7 +719,6 @@ async fn test_serialization(cx: &mut TestAppContext) {
             prompt_builder.clone(),
             Arc::new(SlashCommandWorkingSet::default()),
             None,
-            None,
             cx,
         )
     });
@@ -780,7 +774,6 @@ async fn test_random_context_collaboration(cx: &mut TestAppContext, mut rng: Std
                 prompt_builder.clone(),
                 Arc::new(SlashCommandWorkingSet::default()),
                 None,
-                None,
                 cx,
             )
         });
@@ -1041,7 +1034,6 @@ fn test_mark_cache_anchors(cx: &mut App) {
         TextThread::local(
             registry,
             None,
-            None,
             prompt_builder.clone(),
             Arc::new(SlashCommandWorkingSet::default()),
             cx,
@@ -1368,7 +1360,6 @@ fn setup_context_editor_with_fake_model(
         TextThread::local(
             registry,
             None,
-            None,
             prompt_builder.clone(),
             Arc::new(SlashCommandWorkingSet::default()),
             cx,

crates/assistant_text_thread/src/text_thread.rs 🔗

@@ -5,7 +5,7 @@ use assistant_slash_command::{
     SlashCommandResult, SlashCommandWorkingSet,
 };
 use assistant_slash_commands::FileCommandMetadata;
-use client::{self, ModelRequestUsage, RequestUsage, proto, telemetry::Telemetry};
+use client::{self, ModelRequestUsage, RequestUsage, proto};
 use clock::ReplicaId;
 use cloud_llm_client::{CompletionIntent, UsageLimit};
 use collections::{HashMap, HashSet};
@@ -19,10 +19,11 @@ use gpui::{
 use itertools::Itertools as _;
 use language::{AnchorRangeExt, Bias, Buffer, LanguageRegistry, OffsetRangeExt, Point, ToOffset};
 use language_model::{
-    LanguageModel, LanguageModelCacheConfiguration, LanguageModelCompletionEvent,
-    LanguageModelImage, LanguageModelRegistry, LanguageModelRequest, LanguageModelRequestMessage,
+    AnthropicCompletionType, AnthropicEventData, AnthropicEventType, LanguageModel,
+    LanguageModelCacheConfiguration, LanguageModelCompletionEvent, LanguageModelImage,
+    LanguageModelRegistry, LanguageModelRequest, LanguageModelRequestMessage,
     LanguageModelToolUseId, MessageContent, PaymentRequiredError, Role, StopReason,
-    report_assistant_event,
+    report_anthropic_event,
 };
 use open_ai::Model as OpenAiModel;
 use paths::text_threads_dir;
@@ -40,7 +41,7 @@ use std::{
     sync::Arc,
     time::{Duration, Instant},
 };
-use telemetry_events::{AssistantEventData, AssistantKind, AssistantPhase};
+
 use text::{BufferSnapshot, ToPoint};
 use ui::IconName;
 use util::{ResultExt, TryFutureExt, post_inc};
@@ -686,7 +687,6 @@ pub struct TextThread {
     pending_cache_warming_task: Task<Option<()>>,
     path: Option<Arc<Path>>,
     _subscriptions: Vec<Subscription>,
-    telemetry: Option<Arc<Telemetry>>,
     language_registry: Arc<LanguageRegistry>,
     project: Option<WeakEntity<Project>>,
     prompt_builder: Arc<PromptBuilder>,
@@ -709,7 +709,6 @@ impl TextThread {
     pub fn local(
         language_registry: Arc<LanguageRegistry>,
         project: Option<WeakEntity<Project>>,
-        telemetry: Option<Arc<Telemetry>>,
         prompt_builder: Arc<PromptBuilder>,
         slash_commands: Arc<SlashCommandWorkingSet>,
         cx: &mut Context<Self>,
@@ -722,7 +721,6 @@ impl TextThread {
             prompt_builder,
             slash_commands,
             project,
-            telemetry,
             cx,
         )
     }
@@ -743,7 +741,6 @@ impl TextThread {
         prompt_builder: Arc<PromptBuilder>,
         slash_commands: Arc<SlashCommandWorkingSet>,
         project: Option<WeakEntity<Project>>,
-        telemetry: Option<Arc<Telemetry>>,
         cx: &mut Context<Self>,
     ) -> Self {
         let buffer = cx.new(|_cx| {
@@ -784,7 +781,6 @@ impl TextThread {
             completion_mode: AgentSettings::get_global(cx).preferred_completion_mode,
             path: None,
             buffer,
-            telemetry,
             project,
             language_registry,
             slash_commands,
@@ -874,7 +870,6 @@ impl TextThread {
         prompt_builder: Arc<PromptBuilder>,
         slash_commands: Arc<SlashCommandWorkingSet>,
         project: Option<WeakEntity<Project>>,
-        telemetry: Option<Arc<Telemetry>>,
         cx: &mut Context<Self>,
     ) -> Self {
         let id = saved_context.id.clone().unwrap_or_else(TextThreadId::new);
@@ -886,7 +881,6 @@ impl TextThread {
             prompt_builder,
             slash_commands,
             project,
-            telemetry,
             cx,
         );
         this.path = Some(path);
@@ -2212,24 +2206,26 @@ impl TextThread {
                         .read(cx)
                         .language()
                         .map(|language| language.name());
-                    report_assistant_event(
-                        AssistantEventData {
-                            conversation_id: Some(this.id.0.clone()),
-                            kind: AssistantKind::Panel,
-                            phase: AssistantPhase::Response,
-                            message_id: None,
-                            model: model.telemetry_id(),
-                            model_provider: model.provider_id().to_string(),
-                            response_latency,
-                            error_message,
-                            language_name: language_name.map(|name| name.to_proto()),
-                        },
-                        this.telemetry.clone(),
-                        cx.http_client(),
-                        model.api_key(cx),
-                        cx.background_executor(),
+
+                    telemetry::event!(
+                        "Assistant Responded",
+                        conversation_id = this.id.0.clone(),
+                        kind = "panel",
+                        phase = "response",
+                        model =  model.telemetry_id(),
+                        model_provider = model.provider_id().to_string(),
+                        response_latency,
+                        error_message,
+                        language_name = language_name.as_ref().map(|name| name.to_proto()),
                     );
 
+                    report_anthropic_event(&model, AnthropicEventData {
+                        completion_type: AnthropicCompletionType::Panel,
+                        event: AnthropicEventType::Response,
+                        language_name: language_name.map(|name| name.to_proto()),
+                        message_id: None,
+                    }, cx);
+
                     if let Ok(stop_reason) = result {
                         match stop_reason {
                             StopReason::ToolUse => {}

crates/assistant_text_thread/src/text_thread_store.rs 🔗

@@ -4,7 +4,7 @@ use crate::{
 };
 use anyhow::{Context as _, Result};
 use assistant_slash_command::{SlashCommandId, SlashCommandWorkingSet};
-use client::{Client, TypedEnvelope, proto, telemetry::Telemetry};
+use client::{Client, TypedEnvelope, proto};
 use clock::ReplicaId;
 use collections::HashMap;
 use context_server::ContextServerId;
@@ -48,7 +48,6 @@ pub struct TextThreadStore {
     fs: Arc<dyn Fs>,
     languages: Arc<LanguageRegistry>,
     slash_commands: Arc<SlashCommandWorkingSet>,
-    telemetry: Arc<Telemetry>,
     _watch_updates: Task<Option<()>>,
     client: Arc<Client>,
     project: WeakEntity<Project>,
@@ -88,7 +87,6 @@ impl TextThreadStore {
     ) -> Task<Result<Entity<Self>>> {
         let fs = project.read(cx).fs().clone();
         let languages = project.read(cx).languages().clone();
-        let telemetry = project.read(cx).client().telemetry().clone();
         cx.spawn(async move |cx| {
             const CONTEXT_WATCH_DURATION: Duration = Duration::from_millis(100);
             let (mut events, _) = fs.watch(text_threads_dir(), CONTEXT_WATCH_DURATION).await;
@@ -102,7 +100,6 @@ impl TextThreadStore {
                     fs,
                     languages,
                     slash_commands,
-                    telemetry,
                     _watch_updates: cx.spawn(async move |this, cx| {
                         async move {
                             while events.next().await.is_some() {
@@ -143,7 +140,6 @@ impl TextThreadStore {
             fs: project.read(cx).fs().clone(),
             languages: project.read(cx).languages().clone(),
             slash_commands: Arc::default(),
-            telemetry: project.read(cx).client().telemetry().clone(),
             _watch_updates: Task::ready(None),
             client: project.read(cx).client(),
             project: project.downgrade(),
@@ -379,7 +375,6 @@ impl TextThreadStore {
             TextThread::local(
                 self.languages.clone(),
                 Some(self.project.clone()),
-                Some(self.telemetry.clone()),
                 self.prompt_builder.clone(),
                 self.slash_commands.clone(),
                 cx,
@@ -402,7 +397,7 @@ impl TextThreadStore {
         let capability = project.capability();
         let language_registry = self.languages.clone();
         let project = self.project.clone();
-        let telemetry = self.telemetry.clone();
+
         let prompt_builder = self.prompt_builder.clone();
         let slash_commands = self.slash_commands.clone();
         let request = self.client.request(proto::CreateContext { project_id });
@@ -419,7 +414,6 @@ impl TextThreadStore {
                     prompt_builder,
                     slash_commands,
                     Some(project),
-                    Some(telemetry),
                     cx,
                 )
             })?;
@@ -457,7 +451,6 @@ impl TextThreadStore {
         let fs = self.fs.clone();
         let languages = self.languages.clone();
         let project = self.project.clone();
-        let telemetry = self.telemetry.clone();
         let load = cx.background_spawn({
             let path = path.clone();
             async move {
@@ -478,7 +471,6 @@ impl TextThreadStore {
                     prompt_builder,
                     slash_commands,
                     Some(project),
-                    Some(telemetry),
                     cx,
                 )
             })?;
@@ -568,7 +560,6 @@ impl TextThreadStore {
         let capability = project.capability();
         let language_registry = self.languages.clone();
         let project = self.project.clone();
-        let telemetry = self.telemetry.clone();
         let request = self.client.request(proto::OpenContext {
             project_id,
             context_id: text_thread_id.to_proto(),
@@ -587,7 +578,6 @@ impl TextThreadStore {
                     prompt_builder,
                     slash_commands,
                     Some(project),
-                    Some(telemetry),
                     cx,
                 )
             })?;

crates/language_model/Cargo.toml 🔗

@@ -39,7 +39,6 @@ serde.workspace = true
 serde_json.workspace = true
 settings.workspace = true
 smol.workspace = true
-telemetry_events.workspace = true
 thiserror.workspace = true
 util.workspace = true
 zed_env_vars.workspace = true

crates/language_model/src/telemetry.rs 🔗

@@ -1,41 +1,101 @@
 use crate::ANTHROPIC_PROVIDER_ID;
 use anthropic::ANTHROPIC_API_URL;
 use anyhow::{Context as _, anyhow};
-use client::telemetry::Telemetry;
 use gpui::BackgroundExecutor;
 use http_client::{AsyncBody, HttpClient, Method, Request as HttpRequest};
 use std::env;
 use std::sync::Arc;
-use telemetry_events::{AssistantEventData, AssistantKind, AssistantPhase};
 use util::ResultExt;
 
-pub fn report_assistant_event(
-    event: AssistantEventData,
-    telemetry: Option<Arc<Telemetry>>,
-    client: Arc<dyn HttpClient>,
-    model_api_key: Option<String>,
-    executor: &BackgroundExecutor,
+#[derive(Clone, Debug)]
+pub struct AnthropicEventData {
+    pub completion_type: AnthropicCompletionType,
+    pub event: AnthropicEventType,
+    pub language_name: Option<String>,
+    pub message_id: Option<String>,
+}
+
+#[derive(Clone, Debug)]
+pub enum AnthropicCompletionType {
+    Editor,
+    Terminal,
+    Panel,
+}
+
+#[derive(Clone, Debug)]
+pub enum AnthropicEventType {
+    Invoked,
+    Response,
+    Accept,
+    Reject,
+}
+
+impl AnthropicCompletionType {
+    fn as_str(&self) -> &'static str {
+        match self {
+            Self::Editor => "natural_language_completion_in_editor",
+            Self::Terminal => "natural_language_completion_in_terminal",
+            Self::Panel => "conversation_message",
+        }
+    }
+}
+
+impl AnthropicEventType {
+    fn as_str(&self) -> &'static str {
+        match self {
+            Self::Invoked => "invoke",
+            Self::Response => "response",
+            Self::Accept => "accept",
+            Self::Reject => "reject",
+        }
+    }
+}
+
+pub fn report_anthropic_event(
+    model: &Arc<dyn crate::LanguageModel>,
+    event: AnthropicEventData,
+    cx: &gpui::App,
 ) {
-    if let Some(telemetry) = telemetry.as_ref() {
-        telemetry.report_assistant_event(event.clone());
-        if telemetry.metrics_enabled() && event.model_provider == ANTHROPIC_PROVIDER_ID.0 {
-            if let Some(api_key) = model_api_key {
-                executor
-                    .spawn(async move {
-                        report_anthropic_event(event, client, api_key)
-                            .await
-                            .log_err();
-                    })
-                    .detach();
-            } else {
-                log::error!("Cannot send Anthropic telemetry because API key is missing");
-            }
+    let reporter = AnthropicEventReporter::new(model, cx);
+    reporter.report(event);
+}
+
+#[derive(Clone)]
+pub struct AnthropicEventReporter {
+    http_client: Arc<dyn HttpClient>,
+    executor: BackgroundExecutor,
+    api_key: Option<String>,
+    is_anthropic: bool,
+}
+
+impl AnthropicEventReporter {
+    pub fn new(model: &Arc<dyn crate::LanguageModel>, cx: &gpui::App) -> Self {
+        Self {
+            http_client: cx.http_client(),
+            executor: cx.background_executor().clone(),
+            api_key: model.api_key(cx),
+            is_anthropic: model.provider_id() == ANTHROPIC_PROVIDER_ID,
         }
     }
+
+    pub fn report(&self, event: AnthropicEventData) {
+        if !self.is_anthropic {
+            return;
+        }
+        let Some(api_key) = self.api_key.clone() else {
+            return;
+        };
+        let client = self.http_client.clone();
+        self.executor
+            .spawn(async move {
+                send_anthropic_event(event, client, api_key).await.log_err();
+            })
+            .detach();
+    }
 }
 
-async fn report_anthropic_event(
-    event: AssistantEventData,
+async fn send_anthropic_event(
+    event: AnthropicEventData,
     client: Arc<dyn HttpClient>,
     api_key: String,
 ) -> anyhow::Result<()> {
@@ -45,18 +105,10 @@ async fn report_anthropic_event(
         .uri(uri)
         .header("X-Api-Key", api_key)
         .header("Content-Type", "application/json");
-    let serialized_event: serde_json::Value = serde_json::json!({
-        "completion_type": match event.kind {
-            AssistantKind::Inline => "natural_language_completion_in_editor",
-            AssistantKind::InlineTerminal => "natural_language_completion_in_terminal",
-            AssistantKind::Panel => "conversation_message",
-        },
-        "event": match event.phase {
-            AssistantPhase::Response => "response",
-            AssistantPhase::Invoked => "invoke",
-            AssistantPhase::Accepted => "accept",
-            AssistantPhase::Rejected => "reject",
-        },
+
+    let serialized_event = serde_json::json!({
+        "completion_type": event.completion_type.as_str(),
+        "event": event.event.as_str(),
         "metadata": {
             "language_name": event.language_name,
             "message_id": event.message_id,

crates/settings_ui/src/settings_ui.rs 🔗

@@ -4,7 +4,6 @@ mod pages;
 
 use anyhow::Result;
 use editor::{Editor, EditorEvent};
-use feature_flags::FeatureFlag;
 use fuzzy::StringMatchCandidate;
 use gpui::{
     Action, App, ClipboardItem, DEFAULT_ADDITIONAL_WINDOW_SIZE, Div, Entity, FocusHandle,
@@ -370,12 +369,6 @@ struct SettingsFieldMetadata {
     should_do_titlecase: Option<bool>,
 }
 
-pub struct SettingsUiFeatureFlag;
-
-impl FeatureFlag for SettingsUiFeatureFlag {
-    const NAME: &'static str = "settings-ui";
-}
-
 pub fn init(cx: &mut App) {
     init_renderers(cx);