Cargo.lock 🔗
@@ -4038,6 +4038,8 @@ dependencies = [
"minidumper",
"paths",
"release_channel",
+ "serde",
+ "serde_json",
"smol",
"workspace-hack",
]
Julia Ryan and Max Brunsfeld created
The minidump-based crash reporting is now entirely separate from our
legacy panic_hook-based reporting. This should improve the association
of minidumps with their metadata and give us more consistent crash
reports.
Release Notes:
- N/A
---------
Co-authored-by: Max Brunsfeld <maxbrunsfeld@gmail.com>
Cargo.lock | 2
crates/crashes/Cargo.toml | 2
crates/crashes/src/crashes.rs | 157 +++++++++++++++-----
crates/proto/proto/app.proto | 6
crates/remote/src/ssh_session.rs | 30 +--
crates/remote_server/src/unix.rs | 93 ++++++-----
crates/zed/src/main.rs | 11 +
crates/zed/src/reliability.rs | 264 +++++++++++++++------------------
8 files changed, 316 insertions(+), 249 deletions(-)
@@ -4038,6 +4038,8 @@ dependencies = [
"minidumper",
"paths",
"release_channel",
+ "serde",
+ "serde_json",
"smol",
"workspace-hack",
]
@@ -12,6 +12,8 @@ minidumper.workspace = true
paths.workspace = true
release_channel.workspace = true
smol.workspace = true
+serde.workspace = true
+serde_json.workspace = true
workspace-hack.workspace = true
[lints]
@@ -2,15 +2,17 @@ use crash_handler::CrashHandler;
use log::info;
use minidumper::{Client, LoopAction, MinidumpBinary};
use release_channel::{RELEASE_CHANNEL, ReleaseChannel};
+use serde::{Deserialize, Serialize};
use std::{
env,
- fs::File,
+ fs::{self, File},
io,
+ panic::Location,
path::{Path, PathBuf},
process::{self, Command},
sync::{
- LazyLock, OnceLock,
+ Arc, OnceLock,
atomic::{AtomicBool, Ordering},
},
thread,
@@ -18,19 +20,17 @@ use std::{
};
// set once the crash handler has initialized and the client has connected to it
-pub static CRASH_HANDLER: AtomicBool = AtomicBool::new(false);
+pub static CRASH_HANDLER: OnceLock<Arc<Client>> = OnceLock::new();
// set when the first minidump request is made to avoid generating duplicate crash reports
pub static REQUESTED_MINIDUMP: AtomicBool = AtomicBool::new(false);
-const CRASH_HANDLER_TIMEOUT: Duration = Duration::from_secs(60);
+const CRASH_HANDLER_PING_TIMEOUT: Duration = Duration::from_secs(60);
+const CRASH_HANDLER_CONNECT_TIMEOUT: Duration = Duration::from_secs(10);
-pub static GENERATE_MINIDUMPS: LazyLock<bool> = LazyLock::new(|| {
- *RELEASE_CHANNEL != ReleaseChannel::Dev || env::var("ZED_GENERATE_MINIDUMPS").is_ok()
-});
-
-pub async fn init(id: String) {
- if !*GENERATE_MINIDUMPS {
+pub async fn init(crash_init: InitCrashHandler) {
+ if *RELEASE_CHANNEL == ReleaseChannel::Dev && env::var("ZED_GENERATE_MINIDUMPS").is_err() {
return;
}
+
let exe = env::current_exe().expect("unable to find ourselves");
let zed_pid = process::id();
// TODO: we should be able to get away with using 1 crash-handler process per machine,
@@ -61,9 +61,11 @@ pub async fn init(id: String) {
smol::Timer::after(retry_frequency).await;
}
let client = maybe_client.unwrap();
- client.send_message(1, id).unwrap(); // set session id on the server
+ client
+ .send_message(1, serde_json::to_vec(&crash_init).unwrap())
+ .unwrap();
- let client = std::sync::Arc::new(client);
+ let client = Arc::new(client);
let handler = crash_handler::CrashHandler::attach(unsafe {
let client = client.clone();
crash_handler::make_crash_event(move |crash_context: &crash_handler::CrashContext| {
@@ -72,7 +74,6 @@ pub async fn init(id: String) {
.compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
.is_ok()
{
- client.send_message(2, "mistakes were made").unwrap();
client.ping().unwrap();
client.request_dump(crash_context).is_ok()
} else {
@@ -87,7 +88,7 @@ pub async fn init(id: String) {
{
handler.set_ptracer(Some(server_pid));
}
- CRASH_HANDLER.store(true, Ordering::Release);
+ CRASH_HANDLER.set(client.clone()).ok();
std::mem::forget(handler);
info!("crash handler registered");
@@ -98,14 +99,43 @@ pub async fn init(id: String) {
}
pub struct CrashServer {
- session_id: OnceLock<String>,
+ initialization_params: OnceLock<InitCrashHandler>,
+ panic_info: OnceLock<CrashPanic>,
+ has_connection: Arc<AtomicBool>,
+}
+
+#[derive(Debug, Deserialize, Serialize, Clone)]
+pub struct CrashInfo {
+ pub init: InitCrashHandler,
+ pub panic: Option<CrashPanic>,
+}
+
+#[derive(Debug, Deserialize, Serialize, Clone)]
+pub struct InitCrashHandler {
+ pub session_id: String,
+ pub zed_version: String,
+ pub release_channel: String,
+ pub commit_sha: String,
+ // pub gpu: String,
+}
+
+#[derive(Deserialize, Serialize, Debug, Clone)]
+pub struct CrashPanic {
+ pub message: String,
+ pub span: String,
}
impl minidumper::ServerHandler for CrashServer {
fn create_minidump_file(&self) -> Result<(File, PathBuf), io::Error> {
- let err_message = "Need to send a message with the ID upon starting the crash handler";
+ let err_message = "Missing initialization data";
let dump_path = paths::logs_dir()
- .join(self.session_id.get().expect(err_message))
+ .join(
+ &self
+ .initialization_params
+ .get()
+ .expect(err_message)
+ .session_id,
+ )
.with_extension("dmp");
let file = File::create(&dump_path)?;
Ok((file, dump_path))
@@ -122,38 +152,71 @@ impl minidumper::ServerHandler for CrashServer {
info!("failed to write minidump: {:#}", e);
}
}
+
+ let crash_info = CrashInfo {
+ init: self
+ .initialization_params
+ .get()
+ .expect("not initialized")
+ .clone(),
+ panic: self.panic_info.get().cloned(),
+ };
+
+ let crash_data_path = paths::logs_dir()
+ .join(&crash_info.init.session_id)
+ .with_extension("json");
+
+ fs::write(crash_data_path, serde_json::to_vec(&crash_info).unwrap()).ok();
+
LoopAction::Exit
}
fn on_message(&self, kind: u32, buffer: Vec<u8>) {
- let message = String::from_utf8(buffer).expect("invalid utf-8");
- info!("kind: {kind}, message: {message}",);
- if kind == 1 {
- self.session_id
- .set(message)
- .expect("session id already initialized");
+ match kind {
+ 1 => {
+ let init_data =
+ serde_json::from_slice::<InitCrashHandler>(&buffer).expect("invalid init data");
+ self.initialization_params
+ .set(init_data)
+ .expect("already initialized");
+ }
+ 2 => {
+ let panic_data =
+ serde_json::from_slice::<CrashPanic>(&buffer).expect("invalid panic data");
+ self.panic_info.set(panic_data).expect("already panicked");
+ }
+ _ => {
+ panic!("invalid message kind");
+ }
}
}
- fn on_client_disconnected(&self, clients: usize) -> LoopAction {
- info!("client disconnected, {clients} remaining");
- if clients == 0 {
- LoopAction::Exit
- } else {
- LoopAction::Continue
- }
+ fn on_client_disconnected(&self, _clients: usize) -> LoopAction {
+ LoopAction::Exit
}
-}
-pub fn handle_panic() {
- if !*GENERATE_MINIDUMPS {
- return;
+ fn on_client_connected(&self, _clients: usize) -> LoopAction {
+ self.has_connection.store(true, Ordering::SeqCst);
+ LoopAction::Continue
}
+}
+
+pub fn handle_panic(message: String, span: Option<&Location>) {
+ let span = span
+ .map(|loc| format!("{}:{}", loc.file(), loc.line()))
+ .unwrap_or_default();
+
// wait 500ms for the crash handler process to start up
// if it's still not there just write panic info and no minidump
let retry_frequency = Duration::from_millis(100);
for _ in 0..5 {
- if CRASH_HANDLER.load(Ordering::Acquire) {
+ if let Some(client) = CRASH_HANDLER.get() {
+ client
+ .send_message(
+ 2,
+ serde_json::to_vec(&CrashPanic { message, span }).unwrap(),
+ )
+ .ok();
log::error!("triggering a crash to generate a minidump...");
#[cfg(target_os = "linux")]
CrashHandler.simulate_signal(crash_handler::Signal::Trap as u32);
@@ -170,14 +233,30 @@ pub fn crash_server(socket: &Path) {
log::info!("Couldn't create socket, there may already be a running crash server");
return;
};
- let ab = AtomicBool::new(false);
+
+ let shutdown = Arc::new(AtomicBool::new(false));
+ let has_connection = Arc::new(AtomicBool::new(false));
+
+ std::thread::spawn({
+ let shutdown = shutdown.clone();
+ let has_connection = has_connection.clone();
+ move || {
+ std::thread::sleep(CRASH_HANDLER_CONNECT_TIMEOUT);
+ if !has_connection.load(Ordering::SeqCst) {
+ shutdown.store(true, Ordering::SeqCst);
+ }
+ }
+ });
+
server
.run(
Box::new(CrashServer {
- session_id: OnceLock::new(),
+ initialization_params: OnceLock::new(),
+ panic_info: OnceLock::new(),
+ has_connection,
}),
- &ab,
- Some(CRASH_HANDLER_TIMEOUT),
+ &shutdown,
+ Some(CRASH_HANDLER_PING_TIMEOUT),
)
.expect("failed to run server");
}
@@ -28,11 +28,13 @@ message GetCrashFiles {
message GetCrashFilesResponse {
repeated CrashReport crashes = 1;
+ repeated string legacy_panics = 2;
}
message CrashReport {
- optional string panic_contents = 1;
- optional bytes minidump_contents = 2;
+ reserved 1, 2;
+ string metadata = 3;
+ bytes minidump_contents = 4;
}
message Extension {
@@ -1490,20 +1490,17 @@ impl RemoteConnection for SshRemoteConnection {
identifier = &unique_identifier,
);
- if let Some(rust_log) = std::env::var("RUST_LOG").ok() {
- start_proxy_command = format!(
- "RUST_LOG={} {}",
- shlex::try_quote(&rust_log).unwrap(),
- start_proxy_command
- )
- }
- if let Some(rust_backtrace) = std::env::var("RUST_BACKTRACE").ok() {
- start_proxy_command = format!(
- "RUST_BACKTRACE={} {}",
- shlex::try_quote(&rust_backtrace).unwrap(),
- start_proxy_command
- )
+ for env_var in ["RUST_LOG", "RUST_BACKTRACE", "ZED_GENERATE_MINIDUMPS"] {
+ if let Some(value) = std::env::var(env_var).ok() {
+ start_proxy_command = format!(
+ "{}={} {} ",
+ env_var,
+ shlex::try_quote(&value).unwrap(),
+ start_proxy_command,
+ );
+ }
}
+
if reconnect {
start_proxy_command.push_str(" --reconnect");
}
@@ -2241,8 +2238,7 @@ impl SshRemoteConnection {
#[cfg(not(target_os = "windows"))]
{
- run_cmd(Command::new("gzip").args(["-9", "-f", &bin_path.to_string_lossy()]))
- .await?;
+ run_cmd(Command::new("gzip").args(["-f", &bin_path.to_string_lossy()])).await?;
}
#[cfg(target_os = "windows")]
{
@@ -2474,7 +2470,7 @@ impl ChannelClient {
},
async {
smol::Timer::after(timeout).await;
- anyhow::bail!("Timeout detected")
+ anyhow::bail!("Timed out resyncing remote client")
},
)
.await
@@ -2488,7 +2484,7 @@ impl ChannelClient {
},
async {
smol::Timer::after(timeout).await;
- anyhow::bail!("Timeout detected")
+ anyhow::bail!("Timed out pinging remote client")
},
)
.await
@@ -34,10 +34,10 @@ use smol::io::AsyncReadExt;
use smol::Async;
use smol::{net::unix::UnixListener, stream::StreamExt as _};
-use std::collections::HashMap;
use std::ffi::OsStr;
use std::ops::ControlFlow;
use std::str::FromStr;
+use std::sync::LazyLock;
use std::{env, thread};
use std::{
io::Write,
@@ -48,6 +48,13 @@ use std::{
use telemetry_events::LocationData;
use util::ResultExt;
+pub static VERSION: LazyLock<&str> = LazyLock::new(|| match *RELEASE_CHANNEL {
+ ReleaseChannel::Stable | ReleaseChannel::Preview => env!("ZED_PKG_VERSION"),
+ ReleaseChannel::Nightly | ReleaseChannel::Dev => {
+ option_env!("ZED_COMMIT_SHA").unwrap_or("missing-zed-commit-sha")
+ }
+});
+
fn init_logging_proxy() {
env_logger::builder()
.format(|buf, record| {
@@ -113,7 +120,6 @@ fn init_logging_server(log_file_path: PathBuf) -> Result<Receiver<Vec<u8>>> {
fn init_panic_hook(session_id: String) {
std::panic::set_hook(Box::new(move |info| {
- crashes::handle_panic();
let payload = info
.payload()
.downcast_ref::<&str>()
@@ -121,6 +127,8 @@ fn init_panic_hook(session_id: String) {
.or_else(|| info.payload().downcast_ref::<String>().cloned())
.unwrap_or_else(|| "Box<Any>".to_string());
+ crashes::handle_panic(payload.clone(), info.location());
+
let backtrace = backtrace::Backtrace::new();
let mut backtrace = backtrace
.frames()
@@ -150,14 +158,6 @@ fn init_panic_hook(session_id: String) {
(&backtrace).join("\n")
);
- let release_channel = *RELEASE_CHANNEL;
- let version = match release_channel {
- ReleaseChannel::Stable | ReleaseChannel::Preview => env!("ZED_PKG_VERSION"),
- ReleaseChannel::Nightly | ReleaseChannel::Dev => {
- option_env!("ZED_COMMIT_SHA").unwrap_or("missing-zed-commit-sha")
- }
- };
-
let panic_data = telemetry_events::Panic {
thread: thread_name.into(),
payload: payload.clone(),
@@ -165,9 +165,9 @@ fn init_panic_hook(session_id: String) {
file: location.file().into(),
line: location.line(),
}),
- app_version: format!("remote-server-{version}"),
+ app_version: format!("remote-server-{}", *VERSION),
app_commit_sha: option_env!("ZED_COMMIT_SHA").map(|sha| sha.into()),
- release_channel: release_channel.dev_name().into(),
+ release_channel: RELEASE_CHANNEL.dev_name().into(),
target: env!("TARGET").to_owned().into(),
os_name: telemetry::os_name(),
os_version: Some(telemetry::os_version()),
@@ -204,8 +204,8 @@ fn handle_crash_files_requests(project: &Entity<HeadlessProject>, client: &Arc<C
client.add_request_handler(
project.downgrade(),
|_, _: TypedEnvelope<proto::GetCrashFiles>, _cx| async move {
+ let mut legacy_panics = Vec::new();
let mut crashes = Vec::new();
- let mut minidumps_by_session_id = HashMap::new();
let mut children = smol::fs::read_dir(paths::logs_dir()).await?;
while let Some(child) = children.next().await {
let child = child?;
@@ -227,41 +227,31 @@ fn handle_crash_files_requests(project: &Entity<HeadlessProject>, client: &Arc<C
.await
.context("error reading panic file")?;
- crashes.push(proto::CrashReport {
- panic_contents: Some(file_contents),
- minidump_contents: None,
- });
+ legacy_panics.push(file_contents);
+ smol::fs::remove_file(&child_path)
+ .await
+ .context("error removing panic")
+ .log_err();
} else if extension == Some(OsStr::new("dmp")) {
- let session_id = child_path.file_stem().unwrap().to_string_lossy();
- minidumps_by_session_id
- .insert(session_id.to_string(), smol::fs::read(&child_path).await?);
- }
-
- // We've done what we can, delete the file
- smol::fs::remove_file(&child_path)
- .await
- .context("error removing panic")
- .log_err();
- }
-
- for crash in &mut crashes {
- let panic: telemetry_events::Panic =
- serde_json::from_str(crash.panic_contents.as_ref().unwrap())?;
- if let dump @ Some(_) = minidumps_by_session_id.remove(&panic.session_id) {
- crash.minidump_contents = dump;
+ let mut json_path = child_path.clone();
+ json_path.set_extension("json");
+ if let Ok(json_content) = smol::fs::read_to_string(&json_path).await {
+ crashes.push(CrashReport {
+ metadata: json_content,
+ minidump_contents: smol::fs::read(&child_path).await?,
+ });
+ smol::fs::remove_file(&child_path).await.log_err();
+ smol::fs::remove_file(&json_path).await.log_err();
+ } else {
+ log::error!("Couldn't find json metadata for crash: {child_path:?}");
+ }
}
}
- crashes.extend(
- minidumps_by_session_id
- .into_values()
- .map(|dmp| CrashReport {
- panic_contents: None,
- minidump_contents: Some(dmp),
- }),
- );
-
- anyhow::Ok(proto::GetCrashFilesResponse { crashes })
+ anyhow::Ok(proto::GetCrashFilesResponse {
+ crashes,
+ legacy_panics,
+ })
},
);
}
@@ -442,7 +432,12 @@ pub fn execute_run(
let app = gpui::Application::headless();
let id = std::process::id().to_string();
app.background_executor()
- .spawn(crashes::init(id.clone()))
+ .spawn(crashes::init(crashes::InitCrashHandler {
+ session_id: id.clone(),
+ zed_version: VERSION.to_owned(),
+ release_channel: release_channel::RELEASE_CHANNEL_NAME.clone(),
+ commit_sha: option_env!("ZED_COMMIT_SHA").unwrap_or("no_sha").to_owned(),
+ }))
.detach();
init_panic_hook(id);
let log_rx = init_logging_server(log_file)?;
@@ -569,7 +564,13 @@ pub fn execute_proxy(identifier: String, is_reconnecting: bool) -> Result<()> {
let server_paths = ServerPaths::new(&identifier)?;
let id = std::process::id().to_string();
- smol::spawn(crashes::init(id.clone())).detach();
+ smol::spawn(crashes::init(crashes::InitCrashHandler {
+ session_id: id.clone(),
+ zed_version: VERSION.to_owned(),
+ release_channel: release_channel::RELEASE_CHANNEL_NAME.clone(),
+ commit_sha: option_env!("ZED_COMMIT_SHA").unwrap_or("no_sha").to_owned(),
+ }))
+ .detach();
init_panic_hook(id);
log::info!("starting proxy process. PID: {}", std::process::id());
@@ -8,6 +8,7 @@ use cli::FORCE_CLI_MODE_ENV_VAR_NAME;
use client::{Client, ProxySettings, UserStore, parse_zed_link};
use collab_ui::channel_view::ChannelView;
use collections::HashMap;
+use crashes::InitCrashHandler;
use db::kvp::{GLOBAL_KEY_VALUE_STORE, KEY_VALUE_STORE};
use editor::Editor;
use extension::ExtensionHostProxy;
@@ -269,7 +270,15 @@ pub fn main() {
let session = app.background_executor().block(Session::new());
app.background_executor()
- .spawn(crashes::init(session_id.clone()))
+ .spawn(crashes::init(InitCrashHandler {
+ session_id: session_id.clone(),
+ zed_version: app_version.to_string(),
+ release_channel: release_channel::RELEASE_CHANNEL_NAME.clone(),
+ commit_sha: app_commit_sha
+ .as_ref()
+ .map(|sha| sha.full())
+ .unwrap_or_else(|| "no sha".to_owned()),
+ }))
.detach();
reliability::init_panic_hook(
app_version,
@@ -12,6 +12,7 @@ use gpui::{App, AppContext as _, SemanticVersion};
use http_client::{self, HttpClient, HttpClientWithUrl, HttpRequestExt, Method};
use paths::{crashes_dir, crashes_retired_dir};
use project::Project;
+use proto::{CrashReport, GetCrashFilesResponse};
use release_channel::{AppCommitSha, RELEASE_CHANNEL, ReleaseChannel};
use reqwest::multipart::{Form, Part};
use settings::Settings;
@@ -51,10 +52,6 @@ pub fn init_panic_hook(
thread::yield_now();
}
}
- crashes::handle_panic();
-
- let thread = thread::current();
- let thread_name = thread.name().unwrap_or("<unnamed>");
let payload = info
.payload()
@@ -63,6 +60,11 @@ pub fn init_panic_hook(
.or_else(|| info.payload().downcast_ref::<String>().cloned())
.unwrap_or_else(|| "Box<Any>".to_string());
+ crashes::handle_panic(payload.clone(), info.location());
+
+ let thread = thread::current();
+ let thread_name = thread.name().unwrap_or("<unnamed>");
+
if *release_channel::RELEASE_CHANNEL == ReleaseChannel::Dev {
let location = info.location().unwrap();
let backtrace = Backtrace::new();
@@ -214,45 +216,53 @@ pub fn init(
let installation_id = installation_id.clone();
let system_id = system_id.clone();
- if let Some(ssh_client) = project.ssh_client() {
- ssh_client.update(cx, |client, cx| {
- if TelemetrySettings::get_global(cx).diagnostics {
- let request = client.proto_client().request(proto::GetCrashFiles {});
- cx.background_spawn(async move {
- let crash_files = request.await?;
- for crash in crash_files.crashes {
- let mut panic: Option<Panic> = crash
- .panic_contents
- .and_then(|s| serde_json::from_str(&s).log_err());
-
- if let Some(panic) = panic.as_mut() {
- panic.session_id = session_id.clone();
- panic.system_id = system_id.clone();
- panic.installation_id = installation_id.clone();
- }
-
- if let Some(minidump) = crash.minidump_contents {
- upload_minidump(
- http_client.clone(),
- minidump.clone(),
- panic.as_ref(),
- )
- .await
- .log_err();
- }
-
- if let Some(panic) = panic {
- upload_panic(&http_client, &panic_report_url, panic, &mut None)
- .await?;
- }
- }
+ let Some(ssh_client) = project.ssh_client() else {
+ return;
+ };
+ ssh_client.update(cx, |client, cx| {
+ if !TelemetrySettings::get_global(cx).diagnostics {
+ return;
+ }
+ let request = client.proto_client().request(proto::GetCrashFiles {});
+ cx.background_spawn(async move {
+ let GetCrashFilesResponse {
+ legacy_panics,
+ crashes,
+ } = request.await?;
+
+ for panic in legacy_panics {
+ if let Some(mut panic) = serde_json::from_str::<Panic>(&panic).log_err() {
+ panic.session_id = session_id.clone();
+ panic.system_id = system_id.clone();
+ panic.installation_id = installation_id.clone();
+ upload_panic(&http_client, &panic_report_url, panic, &mut None).await?;
+ }
+ }
- anyhow::Ok(())
- })
- .detach_and_log_err(cx);
+ let Some(endpoint) = MINIDUMP_ENDPOINT.as_ref() else {
+ return Ok(());
+ };
+ for CrashReport {
+ metadata,
+ minidump_contents,
+ } in crashes
+ {
+ if let Some(metadata) = serde_json::from_str(&metadata).log_err() {
+ upload_minidump(
+ http_client.clone(),
+ endpoint,
+ minidump_contents,
+ &metadata,
+ )
+ .await
+ .log_err();
+ }
}
+
+ anyhow::Ok(())
})
- }
+ .detach_and_log_err(cx);
+ })
})
.detach();
}
@@ -466,16 +476,18 @@ fn upload_panics_and_crashes(
installation_id: Option<String>,
cx: &App,
) {
- let telemetry_settings = *client::TelemetrySettings::get_global(cx);
+ if !client::TelemetrySettings::get_global(cx).diagnostics {
+ return;
+ }
cx.background_spawn(async move {
- let most_recent_panic =
- upload_previous_panics(http.clone(), &panic_report_url, telemetry_settings)
- .await
- .log_err()
- .flatten();
- upload_previous_crashes(http, most_recent_panic, installation_id, telemetry_settings)
+ upload_previous_minidumps(http.clone()).await.warn_on_err();
+ let most_recent_panic = upload_previous_panics(http.clone(), &panic_report_url)
.await
.log_err()
+ .flatten();
+ upload_previous_crashes(http, most_recent_panic, installation_id)
+ .await
+ .log_err();
})
.detach()
}
@@ -484,7 +496,6 @@ fn upload_panics_and_crashes(
async fn upload_previous_panics(
http: Arc<HttpClientWithUrl>,
panic_report_url: &Url,
- telemetry_settings: client::TelemetrySettings,
) -> anyhow::Result<Option<(i64, String)>> {
let mut children = smol::fs::read_dir(paths::logs_dir()).await?;
@@ -507,58 +518,41 @@ async fn upload_previous_panics(
continue;
}
- if telemetry_settings.diagnostics {
- let panic_file_content = smol::fs::read_to_string(&child_path)
- .await
- .context("error reading panic file")?;
-
- let panic: Option<Panic> = serde_json::from_str(&panic_file_content)
- .log_err()
- .or_else(|| {
- panic_file_content
- .lines()
- .next()
- .and_then(|line| serde_json::from_str(line).ok())
- })
- .unwrap_or_else(|| {
- log::error!("failed to deserialize panic file {:?}", panic_file_content);
- None
- });
-
- if let Some(panic) = panic {
- let minidump_path = paths::logs_dir()
- .join(&panic.session_id)
- .with_extension("dmp");
- if minidump_path.exists() {
- let minidump = smol::fs::read(&minidump_path)
- .await
- .context("Failed to read minidump")?;
- if upload_minidump(http.clone(), minidump, Some(&panic))
- .await
- .log_err()
- .is_some()
- {
- fs::remove_file(minidump_path).ok();
- }
- }
+ let panic_file_content = smol::fs::read_to_string(&child_path)
+ .await
+ .context("error reading panic file")?;
- if !upload_panic(&http, &panic_report_url, panic, &mut most_recent_panic).await? {
- continue;
- }
- }
- }
+ let panic: Option<Panic> = serde_json::from_str(&panic_file_content)
+ .log_err()
+ .or_else(|| {
+ panic_file_content
+ .lines()
+ .next()
+ .and_then(|line| serde_json::from_str(line).ok())
+ })
+ .unwrap_or_else(|| {
+ log::error!("failed to deserialize panic file {:?}", panic_file_content);
+ None
+ });
- // We've done what we can, delete the file
- fs::remove_file(child_path)
- .context("error removing panic")
- .log_err();
+ if let Some(panic) = panic
+ && upload_panic(&http, &panic_report_url, panic, &mut most_recent_panic).await?
+ {
+ // We've done what we can, delete the file
+ fs::remove_file(child_path)
+ .context("error removing panic")
+ .log_err();
+ }
}
- if MINIDUMP_ENDPOINT.is_none() {
- return Ok(most_recent_panic);
- }
+ Ok(most_recent_panic)
+}
+
+pub async fn upload_previous_minidumps(http: Arc<HttpClientWithUrl>) -> anyhow::Result<()> {
+ let Some(minidump_endpoint) = MINIDUMP_ENDPOINT.as_ref() else {
+ return Err(anyhow::anyhow!("Minidump endpoint not set"));
+ };
- // loop back over the directory again to upload any minidumps that are missing panics
let mut children = smol::fs::read_dir(paths::logs_dir()).await?;
while let Some(child) = children.next().await {
let child = child?;
@@ -566,33 +560,35 @@ async fn upload_previous_panics(
if child_path.extension() != Some(OsStr::new("dmp")) {
continue;
}
- if upload_minidump(
- http.clone(),
- smol::fs::read(&child_path)
- .await
- .context("Failed to read minidump")?,
- None,
- )
- .await
- .log_err()
- .is_some()
- {
- fs::remove_file(child_path).ok();
+ let mut json_path = child_path.clone();
+ json_path.set_extension("json");
+ if let Ok(metadata) = serde_json::from_slice(&smol::fs::read(&json_path).await?) {
+ if upload_minidump(
+ http.clone(),
+ &minidump_endpoint,
+ smol::fs::read(&child_path)
+ .await
+ .context("Failed to read minidump")?,
+ &metadata,
+ )
+ .await
+ .log_err()
+ .is_some()
+ {
+ fs::remove_file(child_path).ok();
+ fs::remove_file(json_path).ok();
+ }
}
}
-
- Ok(most_recent_panic)
+ Ok(())
}
async fn upload_minidump(
http: Arc<HttpClientWithUrl>,
+ endpoint: &str,
minidump: Vec<u8>,
- panic: Option<&Panic>,
+ metadata: &crashes::CrashInfo,
) -> Result<()> {
- let minidump_endpoint = MINIDUMP_ENDPOINT
- .to_owned()
- .ok_or_else(|| anyhow::anyhow!("Minidump endpoint not set"))?;
-
let mut form = Form::new()
.part(
"upload_file_minidump",
@@ -600,38 +596,22 @@ async fn upload_minidump(
.file_name("minidump.dmp")
.mime_str("application/octet-stream")?,
)
+ .text(
+ "sentry[tags][channel]",
+ metadata.init.release_channel.clone(),
+ )
+ .text("sentry[tags][version]", metadata.init.zed_version.clone())
+ .text("sentry[release]", metadata.init.commit_sha.clone())
.text("platform", "rust");
- if let Some(panic) = panic {
- form = form
- .text("sentry[tags][channel]", panic.release_channel.clone())
- .text("sentry[tags][version]", panic.app_version.clone())
- .text("sentry[context][os][name]", panic.os_name.clone())
- .text(
- "sentry[context][device][architecture]",
- panic.architecture.clone(),
- )
- .text("sentry[logentry][formatted]", panic.payload.clone());
-
- if let Some(sha) = panic.app_commit_sha.clone() {
- form = form.text("sentry[release]", sha)
- } else {
- form = form.text(
- "sentry[release]",
- format!("{}-{}", panic.release_channel, panic.app_version),
- )
- }
- if let Some(v) = panic.os_version.clone() {
- form = form.text("sentry[context][os][release]", v);
- }
- if let Some(location) = panic.location_data.as_ref() {
- form = form.text("span", format!("{}:{}", location.file, location.line))
- }
+ if let Some(panic_info) = metadata.panic.as_ref() {
+ form = form.text("sentry[logentry][formatted]", panic_info.message.clone());
+ form = form.text("span", panic_info.span.clone());
// TODO: add gpu-context, feature-flag-context, and more of device-context like gpu
// name, screen resolution, available ram, device model, etc
}
let mut response_text = String::new();
- let mut response = http.send_multipart_form(&minidump_endpoint, form).await?;
+ let mut response = http.send_multipart_form(endpoint, form).await?;
response
.body_mut()
.read_to_string(&mut response_text)
@@ -681,11 +661,7 @@ async fn upload_previous_crashes(
http: Arc<HttpClientWithUrl>,
most_recent_panic: Option<(i64, String)>,
installation_id: Option<String>,
- telemetry_settings: client::TelemetrySettings,
) -> Result<()> {
- if !telemetry_settings.diagnostics {
- return Ok(());
- }
let last_uploaded = KEY_VALUE_STORE
.read_kvp(LAST_CRASH_UPLOADED)?
.unwrap_or("zed-2024-01-17-221900.ips".to_string()); // don't upload old crash reports from before we had this.