1mod event_coalescer;
2
3use crate::TelemetrySettings;
4use anyhow::{Context as _, Result};
5use clock::SystemClock;
6use fs::Fs;
7use futures::channel::mpsc;
8use futures::{Future, StreamExt};
9use gpui::{App, AppContext as _, BackgroundExecutor, Task};
10use http_client::{self, AsyncBody, HttpClient, HttpClientWithUrl, Method, Request};
11use parking_lot::Mutex;
12use regex::Regex;
13use release_channel::ReleaseChannel;
14use settings::{Settings, SettingsStore};
15use sha2::{Digest, Sha256};
16use std::collections::HashSet;
17use std::fs::File;
18use std::io::Write;
19use std::sync::LazyLock;
20use std::time::Instant;
21use std::{env, mem, path::PathBuf, sync::Arc, time::Duration};
22use telemetry_events::{AssistantEventData, AssistantPhase, Event, EventRequestBody, EventWrapper};
23
24pub struct TelemetrySubscription {
25 pub historical_events: Result<HistoricalEvents>,
26 pub queued_events: Vec<EventWrapper>,
27 pub live_events: mpsc::UnboundedReceiver<EventWrapper>,
28}
29
30pub struct HistoricalEvents {
31 pub events: Vec<EventWrapper>,
32 pub parse_error_count: usize,
33}
34use util::ResultExt as _;
35use worktree::{UpdatedEntriesSet, WorktreeId};
36
37use self::event_coalescer::EventCoalescer;
38
39pub struct Telemetry {
40 clock: Arc<dyn SystemClock>,
41 http_client: Arc<HttpClientWithUrl>,
42 executor: BackgroundExecutor,
43 state: Arc<Mutex<TelemetryState>>,
44}
45
46struct TelemetryState {
47 settings: TelemetrySettings,
48 system_id: Option<Arc<str>>, // Per system
49 installation_id: Option<Arc<str>>, // Per app installation (different for dev, nightly, preview, and stable)
50 session_id: Option<String>, // Per app launch
51 metrics_id: Option<Arc<str>>, // Per logged-in user
52 release_channel: Option<&'static str>,
53 architecture: &'static str,
54 events_queue: Vec<EventWrapper>,
55 flush_events_task: Option<Task<()>>,
56
57 log_file: Option<File>,
58 is_staff: Option<bool>,
59 first_event_date_time: Option<Instant>,
60 event_coalescer: EventCoalescer,
61 max_queue_size: usize,
62 worktrees_with_project_type_events_sent: HashSet<WorktreeId>,
63
64 os_name: String,
65 app_version: String,
66 os_version: Option<String>,
67
68 subscribers: Vec<mpsc::UnboundedSender<EventWrapper>>,
69}
70
71#[cfg(debug_assertions)]
72const MAX_QUEUE_LEN: usize = 5;
73
74#[cfg(not(debug_assertions))]
75const MAX_QUEUE_LEN: usize = 50;
76
77#[cfg(debug_assertions)]
78const FLUSH_INTERVAL: Duration = Duration::from_secs(1);
79
80#[cfg(not(debug_assertions))]
81const FLUSH_INTERVAL: Duration = Duration::from_secs(60 * 5);
82static ZED_CLIENT_CHECKSUM_SEED: LazyLock<Option<Vec<u8>>> = LazyLock::new(|| {
83 option_env!("ZED_CLIENT_CHECKSUM_SEED")
84 .map(|s| s.as_bytes().into())
85 .or_else(|| {
86 env::var("ZED_CLIENT_CHECKSUM_SEED")
87 .ok()
88 .map(|s| s.as_bytes().into())
89 })
90});
91
92pub static MINIDUMP_ENDPOINT: LazyLock<Option<String>> = LazyLock::new(|| {
93 option_env!("ZED_MINIDUMP_ENDPOINT")
94 .map(str::to_string)
95 .or_else(|| env::var("ZED_MINIDUMP_ENDPOINT").ok())
96});
97
98static DOTNET_PROJECT_FILES_REGEX: LazyLock<Regex> = LazyLock::new(|| {
99 Regex::new(r"^(global\.json|Directory\.Build\.props|.*\.(csproj|fsproj|vbproj|sln))$").unwrap()
100});
101
102#[cfg(target_os = "macos")]
103static MACOS_VERSION_REGEX: LazyLock<Regex> =
104 LazyLock::new(|| Regex::new(r"(\s*\(Build [^)]*[0-9]\))").unwrap());
105
106pub fn os_name() -> String {
107 #[cfg(target_os = "macos")]
108 {
109 "macOS".to_string()
110 }
111 #[cfg(target_os = "linux")]
112 {
113 format!("Linux {}", gpui::guess_compositor())
114 }
115 #[cfg(target_os = "freebsd")]
116 {
117 format!("FreeBSD {}", gpui::guess_compositor())
118 }
119
120 #[cfg(target_os = "windows")]
121 {
122 "Windows".to_string()
123 }
124}
125
126/// Note: This might do blocking IO! Only call from background threads
127pub fn os_version() -> String {
128 #[cfg(target_os = "macos")]
129 {
130 use objc2_foundation::NSProcessInfo;
131 let process_info = NSProcessInfo::processInfo();
132 let version_nsstring = unsafe { process_info.operatingSystemVersionString() };
133 // "Version 15.6.1 (Build 24G90)" -> "15.6.1 (Build 24G90)"
134 let version_string = version_nsstring.to_string().replace("Version ", "");
135 // "15.6.1 (Build 24G90)" -> "15.6.1"
136 // "26.0.0 (Build 25A5349a)" -> unchanged (Beta or Rapid Security Response; ends with letter)
137 MACOS_VERSION_REGEX
138 .replace_all(&version_string, "")
139 .to_string()
140 }
141 #[cfg(any(target_os = "linux", target_os = "freebsd"))]
142 {
143 use std::path::Path;
144
145 let content = if let Ok(file) = std::fs::read_to_string(&Path::new("/etc/os-release")) {
146 file
147 } else if let Ok(file) = std::fs::read_to_string(&Path::new("/usr/lib/os-release")) {
148 file
149 } else if let Ok(file) = std::fs::read_to_string(&Path::new("/var/run/os-release")) {
150 file
151 } else {
152 log::error!(
153 "Failed to load /etc/os-release, /usr/lib/os-release, or /var/run/os-release"
154 );
155 "".to_string()
156 };
157 let mut name = "unknown";
158 let mut version = "unknown";
159
160 for line in content.lines() {
161 match line.split_once('=') {
162 Some(("ID", val)) => name = val.trim_matches('"'),
163 Some(("VERSION_ID", val)) => version = val.trim_matches('"'),
164 _ => {}
165 }
166 }
167
168 format!("{} {}", name, version)
169 }
170
171 #[cfg(target_os = "windows")]
172 {
173 let mut info = unsafe { std::mem::zeroed() };
174 let status = unsafe { windows::Wdk::System::SystemServices::RtlGetVersion(&mut info) };
175 if status.is_ok() {
176 semver::Version::new(
177 info.dwMajorVersion as _,
178 info.dwMinorVersion as _,
179 info.dwBuildNumber as _,
180 )
181 .to_string()
182 } else {
183 "unknown".to_string()
184 }
185 }
186}
187
188impl Telemetry {
189 pub fn new(
190 clock: Arc<dyn SystemClock>,
191 client: Arc<HttpClientWithUrl>,
192 cx: &mut App,
193 ) -> Arc<Self> {
194 let release_channel =
195 ReleaseChannel::try_global(cx).map(|release_channel| release_channel.display_name());
196
197 let state = Arc::new(Mutex::new(TelemetryState {
198 settings: *TelemetrySettings::get_global(cx),
199 architecture: env::consts::ARCH,
200 release_channel,
201 system_id: None,
202 installation_id: None,
203 session_id: None,
204 metrics_id: None,
205 events_queue: Vec::new(),
206 flush_events_task: None,
207 log_file: None,
208 is_staff: None,
209 first_event_date_time: None,
210 event_coalescer: EventCoalescer::new(clock.clone()),
211 max_queue_size: MAX_QUEUE_LEN,
212 worktrees_with_project_type_events_sent: HashSet::new(),
213
214 os_version: None,
215 os_name: os_name(),
216 app_version: release_channel::AppVersion::global(cx).to_string(),
217 subscribers: Vec::new(),
218 }));
219
220 cx.background_spawn({
221 let state = state.clone();
222 let os_version = os_version();
223 state.lock().os_version = Some(os_version);
224 async move {
225 if let Some(tempfile) = File::create(Self::log_file_path()).ok() {
226 state.lock().log_file = Some(tempfile);
227 }
228 }
229 })
230 .detach();
231
232 cx.observe_global::<SettingsStore>({
233 let state = state.clone();
234
235 move |cx| {
236 let mut state = state.lock();
237 state.settings = *TelemetrySettings::get_global(cx);
238 }
239 })
240 .detach();
241
242 let this = Arc::new(Self {
243 clock,
244 http_client: client,
245 executor: cx.background_executor().clone(),
246 state,
247 });
248
249 let (tx, mut rx) = mpsc::unbounded();
250 ::telemetry::init(tx);
251
252 cx.background_spawn({
253 let this = Arc::downgrade(&this);
254 async move {
255 if cfg!(feature = "test-support") {
256 return;
257 }
258 while let Some(event) = rx.next().await {
259 let Some(state) = this.upgrade() else { break };
260 state.report_event(Event::Flexible(event))
261 }
262 }
263 })
264 .detach();
265
266 // We should only ever have one instance of Telemetry, leak the subscription to keep it alive
267 // rather than store in TelemetryState, complicating spawn as subscriptions are not Send
268 std::mem::forget(cx.on_app_quit({
269 let this = this.clone();
270 move |_| this.shutdown_telemetry()
271 }));
272
273 this
274 }
275
276 #[cfg(any(test, feature = "test-support"))]
277 fn shutdown_telemetry(self: &Arc<Self>) -> impl Future<Output = ()> + use<> {
278 Task::ready(())
279 }
280
281 // Skip calling this function in tests.
282 // TestAppContext ends up calling this function on shutdown and it panics when trying to find the TelemetrySettings
283 #[cfg(not(any(test, feature = "test-support")))]
284 fn shutdown_telemetry(self: &Arc<Self>) -> impl Future<Output = ()> + use<> {
285 telemetry::event!("App Closed");
286 // TODO: close final edit period and make sure it's sent
287 Task::ready(())
288 }
289
290 pub fn log_file_path() -> PathBuf {
291 paths::logs_dir().join("telemetry.log")
292 }
293
294 pub async fn subscribe_with_history(
295 self: &Arc<Self>,
296 fs: Arc<dyn Fs>,
297 ) -> TelemetrySubscription {
298 let historical_events = self.read_log_file(fs).await;
299
300 let mut state = self.state.lock();
301 let queued_events: Vec<EventWrapper> = state.events_queue.clone();
302
303 let (tx, rx) = mpsc::unbounded();
304 state.subscribers.push(tx);
305
306 drop(state);
307
308 TelemetrySubscription {
309 historical_events,
310 queued_events,
311 live_events: rx,
312 }
313 }
314
315 async fn read_log_file(self: &Arc<Self>, fs: Arc<dyn Fs>) -> anyhow::Result<HistoricalEvents> {
316 const MAX_LOG_READ: usize = 5 * 1024 * 1024;
317
318 let path = Self::log_file_path();
319
320 let content = fs
321 .load_bytes(&path)
322 .await
323 .with_context(|| format!("failed to load telemetry log from {:?}", path))?;
324
325 let start_offset = if content.len() > MAX_LOG_READ {
326 let skip = content.len() - MAX_LOG_READ;
327 content[skip..]
328 .iter()
329 .position(|&b| b == b'\n')
330 .map(|pos| skip + pos + 1)
331 .unwrap_or(skip)
332 } else {
333 0
334 };
335
336 let content_str = std::str::from_utf8(&content[start_offset..])
337 .context("telemetry log file contains invalid UTF-8")?;
338
339 let mut events = Vec::new();
340 let mut parse_error_count = 0;
341
342 for line in content_str.lines() {
343 if line.trim().is_empty() {
344 continue;
345 }
346 match serde_json::from_str::<EventWrapper>(line) {
347 Ok(event) => events.push(event),
348 Err(_) => parse_error_count += 1,
349 }
350 }
351
352 Ok(HistoricalEvents {
353 events,
354 parse_error_count,
355 })
356 }
357
358 pub fn has_checksum_seed(&self) -> bool {
359 ZED_CLIENT_CHECKSUM_SEED.is_some()
360 }
361
362 pub fn start(
363 self: &Arc<Self>,
364 system_id: Option<String>,
365 installation_id: Option<String>,
366 session_id: String,
367 cx: &App,
368 ) {
369 let mut state = self.state.lock();
370 state.system_id = system_id.map(|id| id.into());
371 state.installation_id = installation_id.map(|id| id.into());
372 state.session_id = Some(session_id);
373 state.app_version = release_channel::AppVersion::global(cx).to_string();
374 state.os_name = os_name();
375 }
376
377 pub fn metrics_enabled(self: &Arc<Self>) -> bool {
378 self.state.lock().settings.metrics
379 }
380
381 pub fn diagnostics_enabled(self: &Arc<Self>) -> bool {
382 self.state.lock().settings.diagnostics
383 }
384
385 pub fn set_authenticated_user_info(
386 self: &Arc<Self>,
387 metrics_id: Option<String>,
388 is_staff: bool,
389 ) {
390 let mut state = self.state.lock();
391
392 if !state.settings.metrics {
393 return;
394 }
395
396 let metrics_id: Option<Arc<str>> = metrics_id.map(|id| id.into());
397 state.metrics_id.clone_from(&metrics_id);
398 state.is_staff = Some(is_staff);
399 drop(state);
400 }
401
402 pub fn report_assistant_event(self: &Arc<Self>, event: AssistantEventData) {
403 let event_type = match event.phase {
404 AssistantPhase::Response => "Assistant Responded",
405 AssistantPhase::Invoked => "Assistant Invoked",
406 AssistantPhase::Accepted => "Assistant Response Accepted",
407 AssistantPhase::Rejected => "Assistant Response Rejected",
408 };
409
410 telemetry::event!(
411 event_type,
412 conversation_id = event.conversation_id,
413 kind = event.kind,
414 phase = event.phase,
415 message_id = event.message_id,
416 model = event.model,
417 model_provider = event.model_provider,
418 response_latency = event.response_latency,
419 error_message = event.error_message,
420 language_name = event.language_name,
421 );
422 }
423
424 pub fn log_edit_event(self: &Arc<Self>, environment: &'static str, is_via_ssh: bool) {
425 static LAST_EVENT_TIME: Mutex<Option<Instant>> = Mutex::new(None);
426
427 let mut state = self.state.lock();
428 let period_data = state.event_coalescer.log_event(environment);
429 drop(state);
430
431 if let Some(mut last_event) = LAST_EVENT_TIME.try_lock() {
432 let current_time = std::time::Instant::now();
433 let last_time = last_event.get_or_insert(current_time);
434
435 if current_time.duration_since(*last_time) > Duration::from_secs(60 * 10) {
436 *last_time = current_time;
437 } else {
438 return;
439 }
440
441 if let Some((start, end, environment)) = period_data {
442 let duration = end
443 .saturating_duration_since(start)
444 .min(Duration::from_secs(60 * 60 * 24))
445 .as_millis() as i64;
446
447 telemetry::event!(
448 "Editor Edited",
449 duration = duration,
450 environment = environment,
451 is_via_ssh = is_via_ssh
452 );
453 }
454 }
455 }
456
457 pub fn report_discovered_project_type_events(
458 self: &Arc<Self>,
459 worktree_id: WorktreeId,
460 updated_entries_set: &UpdatedEntriesSet,
461 ) {
462 let Some(project_types) = self.detect_project_types(worktree_id, updated_entries_set)
463 else {
464 return;
465 };
466
467 for project_type in project_types {
468 telemetry::event!("Project Opened", project_type = project_type);
469 }
470 }
471
472 fn detect_project_types(
473 self: &Arc<Self>,
474 worktree_id: WorktreeId,
475 updated_entries_set: &UpdatedEntriesSet,
476 ) -> Option<Vec<String>> {
477 let mut state = self.state.lock();
478
479 if state
480 .worktrees_with_project_type_events_sent
481 .contains(&worktree_id)
482 {
483 return None;
484 }
485
486 let mut project_types: HashSet<&str> = HashSet::new();
487
488 for (path, _, _) in updated_entries_set.iter() {
489 let Some(file_name) = path.file_name() else {
490 continue;
491 };
492
493 let project_type = if file_name == "pnpm-lock.yaml" {
494 Some("pnpm")
495 } else if file_name == "yarn.lock" {
496 Some("yarn")
497 } else if file_name == "package.json" {
498 Some("node")
499 } else if DOTNET_PROJECT_FILES_REGEX.is_match(file_name) {
500 Some("dotnet")
501 } else {
502 None
503 };
504
505 if let Some(project_type) = project_type {
506 project_types.insert(project_type);
507 };
508 }
509
510 if !project_types.is_empty() {
511 state
512 .worktrees_with_project_type_events_sent
513 .insert(worktree_id);
514 }
515
516 let mut project_types: Vec<_> = project_types.into_iter().map(String::from).collect();
517 project_types.sort();
518 Some(project_types)
519 }
520
521 fn report_event(self: &Arc<Self>, mut event: Event) {
522 let mut state = self.state.lock();
523 // RUST_LOG=telemetry=trace to debug telemetry events
524 log::trace!(target: "telemetry", "{:?}", event);
525
526 if !state.settings.metrics {
527 return;
528 }
529
530 match &mut event {
531 Event::Flexible(event) => event
532 .event_properties
533 .insert("event_source".into(), "zed".into()),
534 };
535
536 if state.flush_events_task.is_none() {
537 let this = self.clone();
538 state.flush_events_task = Some(self.executor.spawn(async move {
539 this.executor.timer(FLUSH_INTERVAL).await;
540 this.flush_events().detach();
541 }));
542 }
543
544 let date_time = self.clock.utc_now();
545
546 let milliseconds_since_first_event = match state.first_event_date_time {
547 Some(first_event_date_time) => date_time
548 .saturating_duration_since(first_event_date_time)
549 .min(Duration::from_secs(60 * 60 * 24))
550 .as_millis() as i64,
551 None => {
552 state.first_event_date_time = Some(date_time);
553 0
554 }
555 };
556
557 let signed_in = state.metrics_id.is_some();
558 let event_wrapper = EventWrapper {
559 signed_in,
560 milliseconds_since_first_event,
561 event,
562 };
563
564 state
565 .subscribers
566 .retain(|tx| tx.unbounded_send(event_wrapper.clone()).is_ok());
567
568 state.events_queue.push(event_wrapper);
569
570 if state.installation_id.is_some() && state.events_queue.len() >= state.max_queue_size {
571 drop(state);
572 self.flush_events().detach();
573 }
574 }
575
576 pub fn metrics_id(self: &Arc<Self>) -> Option<Arc<str>> {
577 self.state.lock().metrics_id.clone()
578 }
579
580 pub fn system_id(self: &Arc<Self>) -> Option<Arc<str>> {
581 self.state.lock().system_id.clone()
582 }
583
584 pub fn installation_id(self: &Arc<Self>) -> Option<Arc<str>> {
585 self.state.lock().installation_id.clone()
586 }
587
588 pub fn is_staff(self: &Arc<Self>) -> Option<bool> {
589 self.state.lock().is_staff
590 }
591
592 fn build_request(
593 self: &Arc<Self>,
594 // We take in the JSON bytes buffer so we can reuse the existing allocation.
595 mut json_bytes: Vec<u8>,
596 event_request: &EventRequestBody,
597 ) -> Result<Request<AsyncBody>> {
598 json_bytes.clear();
599 serde_json::to_writer(&mut json_bytes, event_request)?;
600
601 let checksum = calculate_json_checksum(&json_bytes).unwrap_or_default();
602
603 Ok(Request::builder()
604 .method(Method::POST)
605 .uri(
606 self.http_client
607 .build_zed_api_url("/telemetry/events", &[])?
608 .as_ref(),
609 )
610 .header("Content-Type", "application/json")
611 .header("x-zed-checksum", checksum)
612 .body(json_bytes.into())?)
613 }
614
615 pub async fn flush_events_inner(self: &Arc<Self>) -> Result<()> {
616 let (json_bytes, request_body) = {
617 let mut state = self.state.lock();
618 state.first_event_date_time = None;
619 let events = mem::take(&mut state.events_queue);
620 state.flush_events_task.take();
621 if events.is_empty() {
622 return Ok(());
623 }
624
625 let mut json_bytes = Vec::new();
626
627 if let Some(file) = &mut state.log_file {
628 for event in &events {
629 json_bytes.clear();
630 serde_json::to_writer(&mut json_bytes, event)?;
631 file.write_all(&json_bytes)?;
632 file.write_all(b"\n")?;
633 }
634 }
635
636 (
637 json_bytes,
638 EventRequestBody {
639 system_id: state.system_id.as_deref().map(Into::into),
640 installation_id: state.installation_id.as_deref().map(Into::into),
641 session_id: state.session_id.clone(),
642 metrics_id: state.metrics_id.as_deref().map(Into::into),
643 is_staff: state.is_staff,
644 app_version: state.app_version.clone(),
645 os_name: state.os_name.clone(),
646 os_version: state.os_version.clone(),
647 architecture: state.architecture.to_string(),
648
649 release_channel: state.release_channel.map(Into::into),
650 events,
651 },
652 )
653 };
654
655 let request = self.build_request(json_bytes, &request_body)?;
656 let response = self.http_client.send(request).await?;
657 if response.status() != 200 {
658 log::error!("Failed to send events: HTTP {:?}", response.status());
659 }
660
661 anyhow::Ok(())
662 }
663
664 pub fn flush_events(self: &Arc<Self>) -> Task<()> {
665 let this = self.clone();
666 self.executor.spawn(async move {
667 this.flush_events_inner().await.log_err();
668 })
669 }
670}
671
672pub fn calculate_json_checksum(json: &impl AsRef<[u8]>) -> Option<String> {
673 let Some(checksum_seed) = &*ZED_CLIENT_CHECKSUM_SEED else {
674 return None;
675 };
676
677 let mut summer = Sha256::new();
678 summer.update(checksum_seed);
679 summer.update(json);
680 summer.update(checksum_seed);
681 let mut checksum = String::new();
682 for byte in summer.finalize().as_slice() {
683 use std::fmt::Write;
684 write!(&mut checksum, "{:02x}", byte).unwrap();
685 }
686
687 Some(checksum)
688}
689
690#[cfg(test)]
691mod tests {
692 use super::*;
693 use clock::FakeSystemClock;
694
695 use gpui::TestAppContext;
696 use http_client::FakeHttpClient;
697 use std::collections::HashMap;
698 use telemetry_events::FlexibleEvent;
699 use util::rel_path::RelPath;
700 use worktree::{PathChange, ProjectEntryId, WorktreeId};
701
702 #[gpui::test]
703 async fn test_telemetry_flush_on_max_queue_size(
704 executor: BackgroundExecutor,
705 cx: &mut TestAppContext,
706 ) {
707 init_test(cx);
708 let clock = Arc::new(FakeSystemClock::new());
709 let http = FakeHttpClient::with_200_response();
710 let system_id = Some("system_id".to_string());
711 let installation_id = Some("installation_id".to_string());
712 let session_id = "session_id".to_string();
713
714 let (telemetry, first_date_time, event) = cx.update(|cx| {
715 let telemetry = Telemetry::new(clock.clone(), http, cx);
716
717 telemetry.state.lock().max_queue_size = 4;
718 telemetry.start(system_id, installation_id, session_id, cx);
719
720 assert!(is_empty_state(&telemetry));
721
722 let first_date_time = clock.utc_now();
723 let event_properties = HashMap::from_iter([(
724 "test_key".to_string(),
725 serde_json::Value::String("test_value".to_string()),
726 )]);
727
728 let event = FlexibleEvent {
729 event_type: "test".to_string(),
730 event_properties,
731 };
732
733 (telemetry, first_date_time, event)
734 });
735
736 cx.update(|_cx| {
737 telemetry.report_event(Event::Flexible(event.clone()));
738 assert_eq!(telemetry.state.lock().events_queue.len(), 1);
739 assert!(telemetry.state.lock().flush_events_task.is_some());
740 assert_eq!(
741 telemetry.state.lock().first_event_date_time,
742 Some(first_date_time)
743 );
744
745 clock.advance(Duration::from_millis(100));
746
747 telemetry.report_event(Event::Flexible(event.clone()));
748 assert_eq!(telemetry.state.lock().events_queue.len(), 2);
749 assert!(telemetry.state.lock().flush_events_task.is_some());
750 assert_eq!(
751 telemetry.state.lock().first_event_date_time,
752 Some(first_date_time)
753 );
754
755 clock.advance(Duration::from_millis(100));
756
757 telemetry.report_event(Event::Flexible(event.clone()));
758 assert_eq!(telemetry.state.lock().events_queue.len(), 3);
759 assert!(telemetry.state.lock().flush_events_task.is_some());
760 assert_eq!(
761 telemetry.state.lock().first_event_date_time,
762 Some(first_date_time)
763 );
764
765 clock.advance(Duration::from_millis(100));
766
767 // Adding a 4th event should cause a flush
768 telemetry.report_event(Event::Flexible(event));
769 });
770
771 // Run the spawned flush task to completion
772 executor.run_until_parked();
773
774 cx.update(|_cx| {
775 assert!(is_empty_state(&telemetry));
776 });
777 }
778
779 #[gpui::test]
780 async fn test_telemetry_flush_on_flush_interval(
781 executor: BackgroundExecutor,
782 cx: &mut TestAppContext,
783 ) {
784 init_test(cx);
785 let clock = Arc::new(FakeSystemClock::new());
786 let http = FakeHttpClient::with_200_response();
787 let system_id = Some("system_id".to_string());
788 let installation_id = Some("installation_id".to_string());
789 let session_id = "session_id".to_string();
790
791 cx.update(|cx| {
792 let telemetry = Telemetry::new(clock.clone(), http, cx);
793 telemetry.state.lock().max_queue_size = 4;
794 telemetry.start(system_id, installation_id, session_id, cx);
795
796 assert!(is_empty_state(&telemetry));
797 let first_date_time = clock.utc_now();
798
799 let event_properties = HashMap::from_iter([(
800 "test_key".to_string(),
801 serde_json::Value::String("test_value".to_string()),
802 )]);
803
804 let event = FlexibleEvent {
805 event_type: "test".to_string(),
806 event_properties,
807 };
808
809 telemetry.report_event(Event::Flexible(event));
810 assert_eq!(telemetry.state.lock().events_queue.len(), 1);
811 assert!(telemetry.state.lock().flush_events_task.is_some());
812 assert_eq!(
813 telemetry.state.lock().first_event_date_time,
814 Some(first_date_time)
815 );
816
817 let duration = Duration::from_millis(1);
818
819 // Test 1 millisecond before the flush interval limit is met
820 executor.advance_clock(FLUSH_INTERVAL - duration);
821
822 assert!(!is_empty_state(&telemetry));
823
824 // Test the exact moment the flush interval limit is met
825 executor.advance_clock(duration);
826
827 assert!(is_empty_state(&telemetry));
828 });
829 }
830
831 #[gpui::test]
832 fn test_project_discovery_does_not_double_report(cx: &mut gpui::TestAppContext) {
833 init_test(cx);
834
835 let clock = Arc::new(FakeSystemClock::new());
836 let http = FakeHttpClient::with_200_response();
837 let telemetry = cx.update(|cx| Telemetry::new(clock.clone(), http, cx));
838 let worktree_id = 1;
839
840 // Scan of empty worktree finds nothing
841 test_project_discovery_helper(telemetry.clone(), vec![], Some(vec![]), worktree_id);
842
843 // Files added, second scan of worktree 1 finds project type
844 test_project_discovery_helper(
845 telemetry.clone(),
846 vec!["package.json"],
847 Some(vec!["node"]),
848 worktree_id,
849 );
850
851 // Third scan of worktree does not double report, as we already reported
852 test_project_discovery_helper(telemetry, vec!["package.json"], None, worktree_id);
853 }
854
855 #[gpui::test]
856 fn test_pnpm_project_discovery(cx: &mut gpui::TestAppContext) {
857 init_test(cx);
858
859 let clock = Arc::new(FakeSystemClock::new());
860 let http = FakeHttpClient::with_200_response();
861 let telemetry = cx.update(|cx| Telemetry::new(clock.clone(), http, cx));
862
863 test_project_discovery_helper(
864 telemetry,
865 vec!["package.json", "pnpm-lock.yaml"],
866 Some(vec!["node", "pnpm"]),
867 1,
868 );
869 }
870
871 #[gpui::test]
872 fn test_yarn_project_discovery(cx: &mut gpui::TestAppContext) {
873 init_test(cx);
874
875 let clock = Arc::new(FakeSystemClock::new());
876 let http = FakeHttpClient::with_200_response();
877 let telemetry = cx.update(|cx| Telemetry::new(clock.clone(), http, cx));
878
879 test_project_discovery_helper(
880 telemetry,
881 vec!["package.json", "yarn.lock"],
882 Some(vec!["node", "yarn"]),
883 1,
884 );
885 }
886
887 #[gpui::test]
888 fn test_dotnet_project_discovery(cx: &mut gpui::TestAppContext) {
889 init_test(cx);
890
891 let clock = Arc::new(FakeSystemClock::new());
892 let http = FakeHttpClient::with_200_response();
893 let telemetry = cx.update(|cx| Telemetry::new(clock.clone(), http, cx));
894
895 // Using different worktrees, as production code blocks from reporting a
896 // project type for the same worktree multiple times
897
898 test_project_discovery_helper(
899 telemetry.clone(),
900 vec!["global.json"],
901 Some(vec!["dotnet"]),
902 1,
903 );
904 test_project_discovery_helper(
905 telemetry.clone(),
906 vec!["Directory.Build.props"],
907 Some(vec!["dotnet"]),
908 2,
909 );
910 test_project_discovery_helper(
911 telemetry.clone(),
912 vec!["file.csproj"],
913 Some(vec!["dotnet"]),
914 3,
915 );
916 test_project_discovery_helper(
917 telemetry.clone(),
918 vec!["file.fsproj"],
919 Some(vec!["dotnet"]),
920 4,
921 );
922 test_project_discovery_helper(
923 telemetry.clone(),
924 vec!["file.vbproj"],
925 Some(vec!["dotnet"]),
926 5,
927 );
928 test_project_discovery_helper(telemetry.clone(), vec!["file.sln"], Some(vec!["dotnet"]), 6);
929
930 // Each worktree should only send a single project type event, even when
931 // encountering multiple files associated with that project type
932 test_project_discovery_helper(
933 telemetry,
934 vec!["global.json", "Directory.Build.props"],
935 Some(vec!["dotnet"]),
936 7,
937 );
938 }
939
940 // TODO:
941 // Test settings
942 // Update FakeHTTPClient to keep track of the number of requests and assert on it
943
944 fn init_test(cx: &mut TestAppContext) {
945 cx.update(|cx| {
946 let settings_store = SettingsStore::test(cx);
947 cx.set_global(settings_store);
948 });
949 }
950
951 fn is_empty_state(telemetry: &Telemetry) -> bool {
952 telemetry.state.lock().events_queue.is_empty()
953 && telemetry.state.lock().flush_events_task.is_none()
954 && telemetry.state.lock().first_event_date_time.is_none()
955 }
956
957 fn test_project_discovery_helper(
958 telemetry: Arc<Telemetry>,
959 file_paths: Vec<&str>,
960 expected_project_types: Option<Vec<&str>>,
961 worktree_id_num: usize,
962 ) {
963 let worktree_id = WorktreeId::from_usize(worktree_id_num);
964 let entries: Vec<_> = file_paths
965 .into_iter()
966 .enumerate()
967 .filter_map(|(i, path)| {
968 Some((
969 Arc::from(RelPath::unix(path).ok()?),
970 ProjectEntryId::from_proto(i as u64 + 1),
971 PathChange::Added,
972 ))
973 })
974 .collect();
975 let updated_entries: UpdatedEntriesSet = Arc::from(entries.as_slice());
976
977 let detected_project_types = telemetry.detect_project_types(worktree_id, &updated_entries);
978
979 let expected_project_types =
980 expected_project_types.map(|types| types.iter().map(|&t| t.to_string()).collect());
981
982 assert_eq!(detected_project_types, expected_project_types);
983 }
984}