From 59bdbf5a5dfc465f6327da4048d72f6536fdd841 Mon Sep 17 00:00:00 2001 From: Nia Date: Sat, 6 Sep 2025 00:27:14 +0200 Subject: [PATCH] Various fixups to unsafe code (#37651) A collection of fixups of possibly-unsound code and removing some small useless writes. Release Notes: - N/A --- crates/fs/src/fs.rs | 15 ++- crates/gpui/src/arena.rs | 27 +++--- crates/gpui/src/util.rs | 6 +- crates/sqlez/src/connection.rs | 170 +++++++++++++++++---------------- crates/sqlez/src/statement.rs | 78 ++++++++------- crates/util/src/util.rs | 4 +- crates/zlog/src/filter.rs | 14 +-- crates/zlog/src/sink.rs | 38 ++++---- 8 files changed, 181 insertions(+), 171 deletions(-) diff --git a/crates/fs/src/fs.rs b/crates/fs/src/fs.rs index a5cf9b88254deff5b9a07402207f19875827d7f0..98c8dc9054984c49732bec57a9604a14ceb5ee72 100644 --- a/crates/fs/src/fs.rs +++ b/crates/fs/src/fs.rs @@ -20,6 +20,9 @@ use std::os::fd::{AsFd, AsRawFd}; #[cfg(unix)] use std::os::unix::fs::{FileTypeExt, MetadataExt}; +#[cfg(any(target_os = "macos", target_os = "freebsd"))] +use std::mem::MaybeUninit; + use async_tar::Archive; use futures::{AsyncRead, Stream, StreamExt, future::BoxFuture}; use git::repository::{GitRepository, RealGitRepository}; @@ -261,14 +264,15 @@ impl FileHandle for std::fs::File { }; let fd = self.as_fd(); - let mut path_buf: [libc::c_char; libc::PATH_MAX as usize] = [0; libc::PATH_MAX as usize]; + let mut path_buf = MaybeUninit::<[u8; libc::PATH_MAX as usize]>::uninit(); let result = unsafe { libc::fcntl(fd.as_raw_fd(), libc::F_GETPATH, path_buf.as_mut_ptr()) }; if result == -1 { anyhow::bail!("fcntl returned -1".to_string()); } - let c_str = unsafe { CStr::from_ptr(path_buf.as_ptr()) }; + // SAFETY: `fcntl` will initialize the path buffer. + let c_str = unsafe { CStr::from_ptr(path_buf.as_ptr().cast()) }; let path = PathBuf::from(OsStr::from_bytes(c_str.to_bytes())); Ok(path) } @@ -296,15 +300,16 @@ impl FileHandle for std::fs::File { }; let fd = self.as_fd(); - let mut kif: libc::kinfo_file = unsafe { std::mem::zeroed() }; + let mut kif = MaybeUninit::::uninit(); kif.kf_structsize = libc::KINFO_FILE_SIZE; - let result = unsafe { libc::fcntl(fd.as_raw_fd(), libc::F_KINFO, &mut kif) }; + let result = unsafe { libc::fcntl(fd.as_raw_fd(), libc::F_KINFO, kif.as_mut_ptr()) }; if result == -1 { anyhow::bail!("fcntl returned -1".to_string()); } - let c_str = unsafe { CStr::from_ptr(kif.kf_path.as_ptr()) }; + // SAFETY: `fcntl` will initialize the kif. + let c_str = unsafe { CStr::from_ptr(kif.assume_init().kf_path.as_ptr()) }; let path = PathBuf::from(OsStr::from_bytes(c_str.to_bytes())); Ok(path) } diff --git a/crates/gpui/src/arena.rs b/crates/gpui/src/arena.rs index 0983bd23454c9a3a921ed721ecd32561387f9049..a0d0c23987472de46d5b23129adb5a4ec8ee00cb 100644 --- a/crates/gpui/src/arena.rs +++ b/crates/gpui/src/arena.rs @@ -1,8 +1,9 @@ use std::{ alloc::{self, handle_alloc_error}, cell::Cell, + num::NonZeroUsize, ops::{Deref, DerefMut}, - ptr, + ptr::{self, NonNull}, rc::Rc, }; @@ -30,23 +31,23 @@ impl Drop for Chunk { fn drop(&mut self) { unsafe { let chunk_size = self.end.offset_from_unsigned(self.start); - // this never fails as it succeeded during allocation - let layout = alloc::Layout::from_size_align(chunk_size, 1).unwrap(); + // SAFETY: This succeeded during allocation. + let layout = alloc::Layout::from_size_align_unchecked(chunk_size, 1); alloc::dealloc(self.start, layout); } } } impl Chunk { - fn new(chunk_size: usize) -> Self { + fn new(chunk_size: NonZeroUsize) -> Self { unsafe { // this only fails if chunk_size is unreasonably huge - let layout = alloc::Layout::from_size_align(chunk_size, 1).unwrap(); + let layout = alloc::Layout::from_size_align(chunk_size.get(), 1).unwrap(); let start = alloc::alloc(layout); if start.is_null() { handle_alloc_error(layout); } - let end = start.add(chunk_size); + let end = start.add(chunk_size.get()); Self { start, end, @@ -55,14 +56,14 @@ impl Chunk { } } - fn allocate(&mut self, layout: alloc::Layout) -> Option<*mut u8> { + fn allocate(&mut self, layout: alloc::Layout) -> Option> { unsafe { let aligned = self.offset.add(self.offset.align_offset(layout.align())); let next = aligned.add(layout.size()); if next <= self.end { self.offset = next; - Some(aligned) + NonNull::new(aligned) } else { None } @@ -79,7 +80,7 @@ pub struct Arena { elements: Vec, valid: Rc>, current_chunk_index: usize, - chunk_size: usize, + chunk_size: NonZeroUsize, } impl Drop for Arena { @@ -90,7 +91,7 @@ impl Drop for Arena { impl Arena { pub fn new(chunk_size: usize) -> Self { - assert!(chunk_size > 0); + let chunk_size = NonZeroUsize::try_from(chunk_size).unwrap(); Self { chunks: vec![Chunk::new(chunk_size)], elements: Vec::new(), @@ -101,7 +102,7 @@ impl Arena { } pub fn capacity(&self) -> usize { - self.chunks.len() * self.chunk_size + self.chunks.len() * self.chunk_size.get() } pub fn clear(&mut self) { @@ -136,7 +137,7 @@ impl Arena { let layout = alloc::Layout::new::(); let mut current_chunk = &mut self.chunks[self.current_chunk_index]; let ptr = if let Some(ptr) = current_chunk.allocate(layout) { - ptr + ptr.as_ptr() } else { self.current_chunk_index += 1; if self.current_chunk_index >= self.chunks.len() { @@ -149,7 +150,7 @@ impl Arena { } current_chunk = &mut self.chunks[self.current_chunk_index]; if let Some(ptr) = current_chunk.allocate(layout) { - ptr + ptr.as_ptr() } else { panic!( "Arena chunk_size of {} is too small to allocate {} bytes", diff --git a/crates/gpui/src/util.rs b/crates/gpui/src/util.rs index 3d7fa06e6ca013ae38b1c63d1bfd624d46cdf4f1..3704784a954f14b8317202e227ffb1b17092d70d 100644 --- a/crates/gpui/src/util.rs +++ b/crates/gpui/src/util.rs @@ -99,9 +99,9 @@ impl Future for WithTimeout { fn poll(self: Pin<&mut Self>, cx: &mut task::Context) -> task::Poll { // SAFETY: the fields of Timeout are private and we never move the future ourselves // And its already pinned since we are being polled (all futures need to be pinned to be polled) - let this = unsafe { self.get_unchecked_mut() }; - let future = unsafe { Pin::new_unchecked(&mut this.future) }; - let timer = unsafe { Pin::new_unchecked(&mut this.timer) }; + let this = unsafe { &raw mut *self.get_unchecked_mut() }; + let future = unsafe { Pin::new_unchecked(&mut (*this).future) }; + let timer = unsafe { Pin::new_unchecked(&mut (*this).timer) }; if let task::Poll::Ready(output) = future.poll(cx) { task::Poll::Ready(Ok(output)) diff --git a/crates/sqlez/src/connection.rs b/crates/sqlez/src/connection.rs index 228bd4c6a2df31f41dc1988596fc87323063d78c..53f0d4e2614f340cc0563d5cd9374bdc3626d9bb 100644 --- a/crates/sqlez/src/connection.rs +++ b/crates/sqlez/src/connection.rs @@ -92,91 +92,97 @@ impl Connection { let mut remaining_sql = sql.as_c_str(); let sql_start = remaining_sql.as_ptr(); - unsafe { - let mut alter_table = None; - while { - let remaining_sql_str = remaining_sql.to_str().unwrap().trim(); - let any_remaining_sql = remaining_sql_str != ";" && !remaining_sql_str.is_empty(); - if any_remaining_sql { - alter_table = parse_alter_table(remaining_sql_str); + let mut alter_table = None; + while { + let remaining_sql_str = remaining_sql.to_str().unwrap().trim(); + let any_remaining_sql = remaining_sql_str != ";" && !remaining_sql_str.is_empty(); + if any_remaining_sql { + alter_table = parse_alter_table(remaining_sql_str); + } + any_remaining_sql + } { + let mut raw_statement = ptr::null_mut::(); + let mut remaining_sql_ptr = ptr::null(); + + let (res, offset, message, _conn) = if let Some((table_to_alter, column)) = alter_table + { + // ALTER TABLE is a weird statement. When preparing the statement the table's + // existence is checked *before* syntax checking any other part of the statement. + // Therefore, we need to make sure that the table has been created before calling + // prepare. As we don't want to trash whatever database this is connected to, we + // create a new in-memory DB to test. + + let temp_connection = Connection::open_memory(None); + //This should always succeed, if it doesn't then you really should know about it + temp_connection + .exec(&format!("CREATE TABLE {table_to_alter}({column})")) + .unwrap()() + .unwrap(); + + unsafe { + sqlite3_prepare_v2( + temp_connection.sqlite3, + remaining_sql.as_ptr(), + -1, + &mut raw_statement, + &mut remaining_sql_ptr, + ) + }; + + #[cfg(not(any(target_os = "linux", target_os = "freebsd")))] + let offset = unsafe { sqlite3_error_offset(temp_connection.sqlite3) }; + + #[cfg(any(target_os = "linux", target_os = "freebsd"))] + let offset = 0; + + unsafe { + ( + sqlite3_errcode(temp_connection.sqlite3), + offset, + sqlite3_errmsg(temp_connection.sqlite3), + Some(temp_connection), + ) } - any_remaining_sql - } { - let mut raw_statement = ptr::null_mut::(); - let mut remaining_sql_ptr = ptr::null(); - - let (res, offset, message, _conn) = - if let Some((table_to_alter, column)) = alter_table { - // ALTER TABLE is a weird statement. When preparing the statement the table's - // existence is checked *before* syntax checking any other part of the statement. - // Therefore, we need to make sure that the table has been created before calling - // prepare. As we don't want to trash whatever database this is connected to, we - // create a new in-memory DB to test. - - let temp_connection = Connection::open_memory(None); - //This should always succeed, if it doesn't then you really should know about it - temp_connection - .exec(&format!("CREATE TABLE {table_to_alter}({column})")) - .unwrap()() - .unwrap(); - - sqlite3_prepare_v2( - temp_connection.sqlite3, - remaining_sql.as_ptr(), - -1, - &mut raw_statement, - &mut remaining_sql_ptr, - ); - - #[cfg(not(any(target_os = "linux", target_os = "freebsd")))] - let offset = sqlite3_error_offset(temp_connection.sqlite3); - - #[cfg(any(target_os = "linux", target_os = "freebsd"))] - let offset = 0; - - ( - sqlite3_errcode(temp_connection.sqlite3), - offset, - sqlite3_errmsg(temp_connection.sqlite3), - Some(temp_connection), - ) - } else { - sqlite3_prepare_v2( - self.sqlite3, - remaining_sql.as_ptr(), - -1, - &mut raw_statement, - &mut remaining_sql_ptr, - ); - - #[cfg(not(any(target_os = "linux", target_os = "freebsd")))] - let offset = sqlite3_error_offset(self.sqlite3); - - #[cfg(any(target_os = "linux", target_os = "freebsd"))] - let offset = 0; - - ( - sqlite3_errcode(self.sqlite3), - offset, - sqlite3_errmsg(self.sqlite3), - None, - ) - }; - - sqlite3_finalize(raw_statement); - - if res == 1 && offset >= 0 { - let sub_statement_correction = - remaining_sql.as_ptr() as usize - sql_start as usize; - let err_msg = - String::from_utf8_lossy(CStr::from_ptr(message as *const _).to_bytes()) - .into_owned(); - - return Some((err_msg, offset as usize + sub_statement_correction)); + } else { + unsafe { + sqlite3_prepare_v2( + self.sqlite3, + remaining_sql.as_ptr(), + -1, + &mut raw_statement, + &mut remaining_sql_ptr, + ) + }; + + #[cfg(not(any(target_os = "linux", target_os = "freebsd")))] + let offset = unsafe { sqlite3_error_offset(self.sqlite3) }; + + #[cfg(any(target_os = "linux", target_os = "freebsd"))] + let offset = 0; + + unsafe { + ( + sqlite3_errcode(self.sqlite3), + offset, + sqlite3_errmsg(self.sqlite3), + None, + ) } - remaining_sql = CStr::from_ptr(remaining_sql_ptr); - alter_table = None; + }; + + unsafe { sqlite3_finalize(raw_statement) }; + + if res == 1 && offset >= 0 { + let sub_statement_correction = remaining_sql.as_ptr() as usize - sql_start as usize; + let err_msg = String::from_utf8_lossy(unsafe { + CStr::from_ptr(message as *const _).to_bytes() + }) + .into_owned(); + + return Some((err_msg, offset as usize + sub_statement_correction)); } + remaining_sql = unsafe { CStr::from_ptr(remaining_sql_ptr) }; + alter_table = None; } None } diff --git a/crates/sqlez/src/statement.rs b/crates/sqlez/src/statement.rs index eb7553f862b0a291bf08345606ff22317d3eec60..d08e58a6f93344d4bb52c35c8c76406724a230b4 100644 --- a/crates/sqlez/src/statement.rs +++ b/crates/sqlez/src/statement.rs @@ -44,41 +44,41 @@ impl<'a> Statement<'a> { connection, phantom: PhantomData, }; - unsafe { - let sql = CString::new(query.as_ref()).context("Error creating cstr")?; - let mut remaining_sql = sql.as_c_str(); - while { - let remaining_sql_str = remaining_sql - .to_str() - .context("Parsing remaining sql")? - .trim(); - remaining_sql_str != ";" && !remaining_sql_str.is_empty() - } { - let mut raw_statement = ptr::null_mut::(); - let mut remaining_sql_ptr = ptr::null(); + let sql = CString::new(query.as_ref()).context("Error creating cstr")?; + let mut remaining_sql = sql.as_c_str(); + while { + let remaining_sql_str = remaining_sql + .to_str() + .context("Parsing remaining sql")? + .trim(); + remaining_sql_str != ";" && !remaining_sql_str.is_empty() + } { + let mut raw_statement = ptr::null_mut::(); + let mut remaining_sql_ptr = ptr::null(); + unsafe { sqlite3_prepare_v2( connection.sqlite3, remaining_sql.as_ptr(), -1, &mut raw_statement, &mut remaining_sql_ptr, - ); + ) + }; - connection.last_error().with_context(|| { - format!("Prepare call failed for query:\n{}", query.as_ref()) - })?; + connection + .last_error() + .with_context(|| format!("Prepare call failed for query:\n{}", query.as_ref()))?; - remaining_sql = CStr::from_ptr(remaining_sql_ptr); - statement.raw_statements.push(raw_statement); + remaining_sql = unsafe { CStr::from_ptr(remaining_sql_ptr) }; + statement.raw_statements.push(raw_statement); - if !connection.can_write() && sqlite3_stmt_readonly(raw_statement) == 0 { - let sql = CStr::from_ptr(sqlite3_sql(raw_statement)); + if !connection.can_write() && unsafe { sqlite3_stmt_readonly(raw_statement) == 0 } { + let sql = unsafe { CStr::from_ptr(sqlite3_sql(raw_statement)) }; - bail!( - "Write statement prepared with connection that is not write capable. SQL:\n{} ", - sql.to_str()? - ) - } + bail!( + "Write statement prepared with connection that is not write capable. SQL:\n{} ", + sql.to_str()? + ) } } @@ -271,23 +271,21 @@ impl<'a> Statement<'a> { } fn step(&mut self) -> Result { - unsafe { - match sqlite3_step(self.current_statement()) { - SQLITE_ROW => Ok(StepResult::Row), - SQLITE_DONE => { - if self.current_statement >= self.raw_statements.len() - 1 { - Ok(StepResult::Done) - } else { - self.current_statement += 1; - self.step() - } - } - SQLITE_MISUSE => anyhow::bail!("Statement step returned SQLITE_MISUSE"), - _other_error => { - self.connection.last_error()?; - unreachable!("Step returned error code and last error failed to catch it"); + match unsafe { sqlite3_step(self.current_statement()) } { + SQLITE_ROW => Ok(StepResult::Row), + SQLITE_DONE => { + if self.current_statement >= self.raw_statements.len() - 1 { + Ok(StepResult::Done) + } else { + self.current_statement += 1; + self.step() } } + SQLITE_MISUSE => anyhow::bail!("Statement step returned SQLITE_MISUSE"), + _other_error => { + self.connection.last_error()?; + unreachable!("Step returned error code and last error failed to catch it"); + } } } diff --git a/crates/util/src/util.rs b/crates/util/src/util.rs index db44e3945186842990f7ef8d7b2794b023324d56..90f5be1c92875ac0b9b2d3e7352ae858371b3686 100644 --- a/crates/util/src/util.rs +++ b/crates/util/src/util.rs @@ -256,6 +256,9 @@ fn load_shell_from_passwd() -> Result<()> { &mut result, ) }; + anyhow::ensure!(!result.is_null(), "passwd entry for uid {} not found", uid); + + // SAFETY: If `getpwuid_r` doesn't error, we have the entry here. let entry = unsafe { pwd.assume_init() }; anyhow::ensure!( @@ -264,7 +267,6 @@ fn load_shell_from_passwd() -> Result<()> { uid, status ); - anyhow::ensure!(!result.is_null(), "passwd entry for uid {} not found", uid); anyhow::ensure!( entry.pw_uid == uid, "passwd entry has different uid ({}) than getuid ({}) returned", diff --git a/crates/zlog/src/filter.rs b/crates/zlog/src/filter.rs index ee3c2410798b286795b9fd78b89502a2a7894987..31a58894774e6c0d08ea22b585350eb26ff09907 100644 --- a/crates/zlog/src/filter.rs +++ b/crates/zlog/src/filter.rs @@ -22,7 +22,7 @@ pub const LEVEL_ENABLED_MAX_DEFAULT: log::LevelFilter = log::LevelFilter::Info; /// crate that the max level is everything, so that we can dynamically enable /// logs that are more verbose than this level without the `log` crate throwing /// them away before we see them -static mut LEVEL_ENABLED_MAX_STATIC: log::LevelFilter = LEVEL_ENABLED_MAX_DEFAULT; +static LEVEL_ENABLED_MAX_STATIC: AtomicU8 = AtomicU8::new(LEVEL_ENABLED_MAX_DEFAULT as u8); /// A cache of the true maximum log level that _could_ be printed. This is based /// on the maximally verbose level that is configured by the user, and is used @@ -46,7 +46,7 @@ const DEFAULT_FILTERS: &[(&str, log::LevelFilter)] = &[ pub fn init_env_filter(filter: env_config::EnvFilter) { if let Some(level_max) = filter.level_global { - unsafe { LEVEL_ENABLED_MAX_STATIC = level_max } + LEVEL_ENABLED_MAX_STATIC.store(level_max as u8, Ordering::Release) } if ENV_FILTER.set(filter).is_err() { panic!("Environment filter cannot be initialized twice"); @@ -54,7 +54,7 @@ pub fn init_env_filter(filter: env_config::EnvFilter) { } pub fn is_possibly_enabled_level(level: log::Level) -> bool { - level as u8 <= LEVEL_ENABLED_MAX_CONFIG.load(Ordering::Relaxed) + level as u8 <= LEVEL_ENABLED_MAX_CONFIG.load(Ordering::Acquire) } pub fn is_scope_enabled(scope: &Scope, module_path: Option<&str>, level: log::Level) -> bool { @@ -66,7 +66,7 @@ pub fn is_scope_enabled(scope: &Scope, module_path: Option<&str>, level: log::Le // scope map return false; } - let is_enabled_by_default = level <= unsafe { LEVEL_ENABLED_MAX_STATIC }; + let is_enabled_by_default = level as u8 <= LEVEL_ENABLED_MAX_STATIC.load(Ordering::Acquire); let global_scope_map = SCOPE_MAP.read().unwrap_or_else(|err| { SCOPE_MAP.clear_poison(); err.into_inner() @@ -92,13 +92,13 @@ pub fn is_scope_enabled(scope: &Scope, module_path: Option<&str>, level: log::Le pub fn refresh_from_settings(settings: &HashMap) { let env_config = ENV_FILTER.get(); let map_new = ScopeMap::new_from_settings_and_env(settings, env_config, DEFAULT_FILTERS); - let mut level_enabled_max = unsafe { LEVEL_ENABLED_MAX_STATIC }; + let mut level_enabled_max = LEVEL_ENABLED_MAX_STATIC.load(Ordering::Acquire); for entry in &map_new.entries { if let Some(level) = entry.enabled { - level_enabled_max = level_enabled_max.max(level); + level_enabled_max = level_enabled_max.max(level as u8); } } - LEVEL_ENABLED_MAX_CONFIG.store(level_enabled_max as u8, Ordering::Release); + LEVEL_ENABLED_MAX_CONFIG.store(level_enabled_max, Ordering::Release); { let mut global_map = SCOPE_MAP.write().unwrap_or_else(|err| { diff --git a/crates/zlog/src/sink.rs b/crates/zlog/src/sink.rs index 3ac85d4bbfc8aaa5d8568cb14b50e04a94708f1c..afbdf37bf9c74860a3b56b706ffc6d64338fd275 100644 --- a/crates/zlog/src/sink.rs +++ b/crates/zlog/src/sink.rs @@ -4,7 +4,7 @@ use std::{ path::PathBuf, sync::{ Mutex, OnceLock, - atomic::{AtomicU64, Ordering}, + atomic::{AtomicBool, AtomicU64, Ordering}, }, }; @@ -19,17 +19,17 @@ const ANSI_GREEN: &str = "\x1b[32m"; const ANSI_BLUE: &str = "\x1b[34m"; const ANSI_MAGENTA: &str = "\x1b[35m"; -/// Whether stdout output is enabled. -static mut ENABLED_SINKS_STDOUT: bool = false; -/// Whether stderr output is enabled. -static mut ENABLED_SINKS_STDERR: bool = false; - /// Is Some(file) if file output is enabled. static ENABLED_SINKS_FILE: Mutex> = Mutex::new(None); static SINK_FILE_PATH: OnceLock<&'static PathBuf> = OnceLock::new(); static SINK_FILE_PATH_ROTATE: OnceLock<&'static PathBuf> = OnceLock::new(); + +// NB: Since this can be accessed in tests, we probably should stick to atomics here. +/// Whether stdout output is enabled. +static ENABLED_SINKS_STDOUT: AtomicBool = AtomicBool::new(false); +/// Whether stderr output is enabled. +static ENABLED_SINKS_STDERR: AtomicBool = AtomicBool::new(false); /// Atomic counter for the size of the log file in bytes. -// TODO: make non-atomic if writing single threaded static SINK_FILE_SIZE_BYTES: AtomicU64 = AtomicU64::new(0); /// Maximum size of the log file before it will be rotated, in bytes. const SINK_FILE_SIZE_BYTES_MAX: u64 = 1024 * 1024; // 1 MB @@ -42,15 +42,13 @@ pub struct Record<'a> { } pub fn init_output_stdout() { - unsafe { - ENABLED_SINKS_STDOUT = true; - } + // Use atomics here instead of just a `static mut`, since in the context + // of tests these accesses can be multi-threaded. + ENABLED_SINKS_STDOUT.store(true, Ordering::Release); } pub fn init_output_stderr() { - unsafe { - ENABLED_SINKS_STDERR = true; - } + ENABLED_SINKS_STDERR.store(true, Ordering::Release); } pub fn init_output_file( @@ -79,7 +77,7 @@ pub fn init_output_file( if size_bytes >= SINK_FILE_SIZE_BYTES_MAX { rotate_log_file(&mut file, Some(path), path_rotate, &SINK_FILE_SIZE_BYTES); } else { - SINK_FILE_SIZE_BYTES.store(size_bytes, Ordering::Relaxed); + SINK_FILE_SIZE_BYTES.store(size_bytes, Ordering::Release); } *enabled_sinks_file = Some(file); @@ -108,7 +106,7 @@ static LEVEL_ANSI_COLORS: [&str; 6] = [ // PERF: batching pub fn submit(record: Record) { - if unsafe { ENABLED_SINKS_STDOUT } { + if ENABLED_SINKS_STDOUT.load(Ordering::Acquire) { let mut stdout = std::io::stdout().lock(); _ = writeln!( &mut stdout, @@ -123,7 +121,7 @@ pub fn submit(record: Record) { }, record.message ); - } else if unsafe { ENABLED_SINKS_STDERR } { + } else if ENABLED_SINKS_STDERR.load(Ordering::Acquire) { let mut stdout = std::io::stderr().lock(); _ = writeln!( &mut stdout, @@ -173,7 +171,7 @@ pub fn submit(record: Record) { }, record.message ); - SINK_FILE_SIZE_BYTES.fetch_add(writer.written, Ordering::Relaxed) + writer.written + SINK_FILE_SIZE_BYTES.fetch_add(writer.written, Ordering::AcqRel) + writer.written }; if file_size_bytes > SINK_FILE_SIZE_BYTES_MAX { rotate_log_file( @@ -187,7 +185,7 @@ pub fn submit(record: Record) { } pub fn flush() { - if unsafe { ENABLED_SINKS_STDOUT } { + if ENABLED_SINKS_STDOUT.load(Ordering::Acquire) { _ = std::io::stdout().lock().flush(); } let mut file = ENABLED_SINKS_FILE.lock().unwrap_or_else(|handle| { @@ -265,7 +263,7 @@ fn rotate_log_file( // according to the documentation, it only fails if: // - the file is not writeable: should never happen, // - the size would cause an overflow (implementation specific): 0 should never cause an overflow - atomic_size.store(0, Ordering::Relaxed); + atomic_size.store(0, Ordering::Release); } #[cfg(test)] @@ -298,7 +296,7 @@ mod tests { std::fs::read_to_string(&rotation_log_file_path).unwrap(), contents, ); - assert_eq!(size.load(Ordering::Relaxed), 0); + assert_eq!(size.load(Ordering::Acquire), 0); } /// Regression test, ensuring that if log level values change we are made aware