telemetry: Add `language_name` and `model_provider` (#18640)

Boris Cherny , Marshall Bowers , and Max created

This PR adds a bit more metadata for assistant logging.

Release Notes:

- Assistant: Added `language_name` and `model_provider` fields to
telemetry events.

---------

Co-authored-by: Marshall Bowers <elliott.codes@gmail.com>
Co-authored-by: Max <max@zed.dev>

Change summary

Cargo.lock                                        |  1 
crates/assistant/src/context.rs                   |  7 +
crates/assistant/src/inline_assistant.rs          | 90 +++++++++++-----
crates/assistant/src/terminal_inline_assistant.rs |  3 
crates/language_model/src/language_model.rs       |  7 +
crates/telemetry_events/Cargo.toml                |  1 
crates/telemetry_events/src/telemetry_events.rs   |  3 
7 files changed, 83 insertions(+), 29 deletions(-)

Detailed changes

Cargo.lock 🔗

@@ -11497,6 +11497,7 @@ dependencies = [
 name = "telemetry_events"
 version = "0.1.0"
 dependencies = [
+ "language",
  "semantic_version",
  "serde",
 ]

crates/assistant/src/context.rs 🔗

@@ -2133,13 +2133,20 @@ impl Context {
                     });
 
                     if let Some(telemetry) = this.telemetry.as_ref() {
+                        let language_name = this
+                            .buffer
+                            .read(cx)
+                            .language()
+                            .map(|language| language.name());
                         telemetry.report_assistant_event(AssistantEvent {
                             conversation_id: Some(this.id.0.clone()),
                             kind: AssistantKind::Panel,
                             phase: AssistantPhase::Response,
                             model: model.telemetry_id(),
+                            model_provider: model.provider_id().to_string(),
                             response_latency,
                             error_message,
+                            language_name,
                         });
                     }
 

crates/assistant/src/inline_assistant.rs 🔗

@@ -210,18 +210,6 @@ impl InlineAssistant {
         initial_prompt: Option<String>,
         cx: &mut WindowContext,
     ) {
-        if let Some(telemetry) = self.telemetry.as_ref() {
-            if let Some(model) = LanguageModelRegistry::read_global(cx).active_model() {
-                telemetry.report_assistant_event(AssistantEvent {
-                    conversation_id: None,
-                    kind: AssistantKind::Inline,
-                    phase: AssistantPhase::Invoked,
-                    model: model.telemetry_id(),
-                    response_latency: None,
-                    error_message: None,
-                });
-            }
-        }
         let snapshot = editor.read(cx).buffer().read(cx).snapshot(cx);
 
         let mut selections = Vec::<Selection<Point>>::new();
@@ -268,6 +256,21 @@ impl InlineAssistant {
                 text_anchor: buffer.anchor_after(buffer_range.end),
             };
             codegen_ranges.push(start..end);
+
+            if let Some(telemetry) = self.telemetry.as_ref() {
+                if let Some(model) = LanguageModelRegistry::read_global(cx).active_model() {
+                    telemetry.report_assistant_event(AssistantEvent {
+                        conversation_id: None,
+                        kind: AssistantKind::Inline,
+                        phase: AssistantPhase::Invoked,
+                        model: model.telemetry_id(),
+                        model_provider: model.provider_id().to_string(),
+                        response_latency: None,
+                        error_message: None,
+                        language_name: buffer.language().map(|language| language.name()),
+                    });
+                }
+            }
         }
 
         let assist_group_id = self.next_assist_group_id.post_inc();
@@ -762,23 +765,34 @@ impl InlineAssistant {
     }
 
     pub fn finish_assist(&mut self, assist_id: InlineAssistId, undo: bool, cx: &mut WindowContext) {
-        if let Some(telemetry) = self.telemetry.as_ref() {
-            if let Some(model) = LanguageModelRegistry::read_global(cx).active_model() {
-                telemetry.report_assistant_event(AssistantEvent {
-                    conversation_id: None,
-                    kind: AssistantKind::Inline,
-                    phase: if undo {
-                        AssistantPhase::Rejected
-                    } else {
-                        AssistantPhase::Accepted
-                    },
-                    model: model.telemetry_id(),
-                    response_latency: None,
-                    error_message: None,
-                });
-            }
-        }
         if let Some(assist) = self.assists.get(&assist_id) {
+            if let Some(telemetry) = self.telemetry.as_ref() {
+                if let Some(model) = LanguageModelRegistry::read_global(cx).active_model() {
+                    let language_name = assist.editor.upgrade().and_then(|editor| {
+                        let multibuffer = editor.read(cx).buffer().read(cx);
+                        let ranges = multibuffer.range_to_buffer_ranges(assist.range.clone(), cx);
+                        ranges
+                            .first()
+                            .and_then(|(buffer, _, _)| buffer.read(cx).language())
+                            .map(|language| language.name())
+                    });
+                    telemetry.report_assistant_event(AssistantEvent {
+                        conversation_id: None,
+                        kind: AssistantKind::Inline,
+                        phase: if undo {
+                            AssistantPhase::Rejected
+                        } else {
+                            AssistantPhase::Accepted
+                        },
+                        model: model.telemetry_id(),
+                        model_provider: model.provider_id().to_string(),
+                        response_latency: None,
+                        error_message: None,
+                        language_name,
+                    });
+                }
+            }
+
             let assist_group_id = assist.group_id;
             if self.assist_groups[&assist_group_id].linked {
                 for assist_id in self.unlink_assist_group(assist_group_id, cx) {
@@ -2707,6 +2721,7 @@ impl CodegenAlternative {
         self.edit_position = Some(self.range.start.bias_right(&self.snapshot));
 
         let telemetry_id = model.telemetry_id();
+        let provider_id = model.provider_id();
         let chunks: LocalBoxFuture<Result<BoxStream<Result<String>>>> =
             if user_prompt.trim().to_lowercase() == "delete" {
                 async { Ok(stream::empty().boxed()) }.boxed_local()
@@ -2717,7 +2732,7 @@ impl CodegenAlternative {
                     .spawn(|_, cx| async move { model.stream_completion_text(request, &cx).await });
                 async move { Ok(chunks.await?.boxed()) }.boxed_local()
             };
-        self.handle_stream(telemetry_id, chunks, cx);
+        self.handle_stream(telemetry_id, provider_id.to_string(), chunks, cx);
         Ok(())
     }
 
@@ -2781,6 +2796,7 @@ impl CodegenAlternative {
     pub fn handle_stream(
         &mut self,
         model_telemetry_id: String,
+        model_provider_id: String,
         stream: impl 'static + Future<Output = Result<BoxStream<'static, Result<String>>>>,
         cx: &mut ModelContext<Self>,
     ) {
@@ -2811,6 +2827,15 @@ impl CodegenAlternative {
         }
 
         let telemetry = self.telemetry.clone();
+        let language_name = {
+            let multibuffer = self.buffer.read(cx);
+            let ranges = multibuffer.range_to_buffer_ranges(self.range.clone(), cx);
+            ranges
+                .first()
+                .and_then(|(buffer, _, _)| buffer.read(cx).language())
+                .map(|language| language.name())
+        };
+
         self.diff = Diff::default();
         self.status = CodegenStatus::Pending;
         let mut edit_start = self.range.start.to_offset(&snapshot);
@@ -2926,8 +2951,10 @@ impl CodegenAlternative {
                                     kind: AssistantKind::Inline,
                                     phase: AssistantPhase::Response,
                                     model: model_telemetry_id,
+                                    model_provider: model_provider_id.to_string(),
                                     response_latency,
                                     error_message,
+                                    language_name,
                                 });
                             }
 
@@ -3540,6 +3567,7 @@ mod tests {
         let (chunks_tx, chunks_rx) = mpsc::unbounded();
         codegen.update(cx, |codegen, cx| {
             codegen.handle_stream(
+                String::new(),
                 String::new(),
                 future::ready(Ok(chunks_rx.map(Ok).boxed())),
                 cx,
@@ -3611,6 +3639,7 @@ mod tests {
         let (chunks_tx, chunks_rx) = mpsc::unbounded();
         codegen.update(cx, |codegen, cx| {
             codegen.handle_stream(
+                String::new(),
                 String::new(),
                 future::ready(Ok(chunks_rx.map(Ok).boxed())),
                 cx,
@@ -3685,6 +3714,7 @@ mod tests {
         let (chunks_tx, chunks_rx) = mpsc::unbounded();
         codegen.update(cx, |codegen, cx| {
             codegen.handle_stream(
+                String::new(),
                 String::new(),
                 future::ready(Ok(chunks_rx.map(Ok).boxed())),
                 cx,
@@ -3758,6 +3788,7 @@ mod tests {
         let (chunks_tx, chunks_rx) = mpsc::unbounded();
         codegen.update(cx, |codegen, cx| {
             codegen.handle_stream(
+                String::new(),
                 String::new(),
                 future::ready(Ok(chunks_rx.map(Ok).boxed())),
                 cx,
@@ -3821,6 +3852,7 @@ mod tests {
         let (chunks_tx, chunks_rx) = mpsc::unbounded();
         codegen.update(cx, |codegen, cx| {
             codegen.handle_stream(
+                String::new(),
                 String::new(),
                 future::ready(Ok(chunks_rx.map(Ok).boxed())),
                 cx,

crates/assistant/src/terminal_inline_assistant.rs 🔗

@@ -1040,6 +1040,7 @@ impl Codegen {
         self.transaction = Some(TerminalTransaction::start(self.terminal.clone()));
         self.generation = cx.spawn(|this, mut cx| async move {
             let model_telemetry_id = model.telemetry_id();
+            let model_provider_id = model.provider_id();
             let response = model.stream_completion_text(prompt, &cx).await;
             let generate = async {
                 let (mut hunks_tx, mut hunks_rx) = mpsc::channel(1);
@@ -1069,8 +1070,10 @@ impl Codegen {
                             kind: AssistantKind::Inline,
                             phase: AssistantPhase::Response,
                             model: model_telemetry_id,
+                            model_provider: model_provider_id.to_string(),
                             response_latency,
                             error_message,
+                            language_name: None,
                         });
                     }
 

crates/language_model/src/language_model.rs 🔗

@@ -22,6 +22,7 @@ pub use request::*;
 pub use role::*;
 use schemars::JsonSchema;
 use serde::{de::DeserializeOwned, Deserialize, Serialize};
+use std::fmt;
 use std::{future::Future, sync::Arc};
 use ui::IconName;
 
@@ -231,6 +232,12 @@ pub struct LanguageModelProviderId(pub SharedString);
 #[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd)]
 pub struct LanguageModelProviderName(pub SharedString);
 
+impl fmt::Display for LanguageModelProviderId {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        write!(f, "{}", self.0)
+    }
+}
+
 impl From<String> for LanguageModelId {
     fn from(value: String) -> Self {
         Self(SharedString::from(value))

crates/telemetry_events/Cargo.toml 🔗

@@ -12,5 +12,6 @@ workspace = true
 path = "src/telemetry_events.rs"
 
 [dependencies]
+language.workspace = true
 semantic_version.workspace = true
 serde.workspace = true

crates/telemetry_events/src/telemetry_events.rs 🔗

@@ -1,5 +1,6 @@
 //! See [Telemetry in Zed](https://zed.dev/docs/telemetry) for additional information.
 
+use language::LanguageName;
 use semantic_version::SemanticVersion;
 use serde::{Deserialize, Serialize};
 use std::{fmt::Display, sync::Arc, time::Duration};
@@ -153,8 +154,10 @@ pub struct AssistantEvent {
     pub phase: AssistantPhase,
     /// Name of the AI model used (gpt-4o, claude-3-5-sonnet, etc)
     pub model: String,
+    pub model_provider: String,
     pub response_latency: Option<Duration>,
     pub error_message: Option<String>,
+    pub language_name: Option<LanguageName>,
 }
 
 #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]