1pub mod arc_cow;
2pub mod archive;
3pub mod command;
4pub mod fs;
5pub mod markdown;
6pub mod paths;
7pub mod serde;
8pub mod size;
9#[cfg(any(test, feature = "test-support"))]
10pub mod test;
11pub mod time;
12
13use anyhow::Result;
14use futures::Future;
15use itertools::Either;
16use regex::Regex;
17use std::num::NonZeroU32;
18use std::sync::{LazyLock, OnceLock};
19use std::{
20 borrow::Cow,
21 cmp::{self, Ordering},
22 env,
23 ops::{AddAssign, Range, RangeInclusive},
24 panic::Location,
25 pin::Pin,
26 task::{Context, Poll},
27 time::Instant,
28};
29use unicase::UniCase;
30
31#[cfg(unix)]
32use anyhow::Context as _;
33
34pub use take_until::*;
35#[cfg(any(test, feature = "test-support"))]
36pub use util_macros::{line_endings, separator, uri};
37
38#[macro_export]
39macro_rules! debug_panic {
40 ( $($fmt_arg:tt)* ) => {
41 if cfg!(debug_assertions) {
42 panic!( $($fmt_arg)* );
43 } else {
44 let backtrace = std::backtrace::Backtrace::capture();
45 log::error!("{}\n{:?}", format_args!($($fmt_arg)*), backtrace);
46 }
47 };
48}
49
50/// A macro to add "C:" to the beginning of a path literal on Windows, and replace all
51/// the separator from `/` to `\`.
52/// But on non-Windows platforms, it will return the path literal as is.
53///
54/// # Examples
55/// ```rust
56/// use util::path;
57///
58/// let path = path!("/Users/user/file.txt");
59/// #[cfg(target_os = "windows")]
60/// assert_eq!(path, "C:\\Users\\user\\file.txt");
61/// #[cfg(not(target_os = "windows"))]
62/// assert_eq!(path, "/Users/user/file.txt");
63/// ```
64#[cfg(all(any(test, feature = "test-support"), target_os = "windows"))]
65#[macro_export]
66macro_rules! path {
67 ($path:literal) => {
68 concat!("C:", util::separator!($path))
69 };
70}
71
72/// A macro to add "C:" to the beginning of a path literal on Windows, and replace all
73/// the separator from `/` to `\`.
74/// But on non-Windows platforms, it will return the path literal as is.
75///
76/// # Examples
77/// ```rust
78/// use util::path;
79///
80/// let path = path!("/Users/user/file.txt");
81/// #[cfg(target_os = "windows")]
82/// assert_eq!(path, "C:\\Users\\user\\file.txt");
83/// #[cfg(not(target_os = "windows"))]
84/// assert_eq!(path, "/Users/user/file.txt");
85/// ```
86#[cfg(all(any(test, feature = "test-support"), not(target_os = "windows")))]
87#[macro_export]
88macro_rules! path {
89 ($path:literal) => {
90 $path
91 };
92}
93
94pub fn truncate(s: &str, max_chars: usize) -> &str {
95 match s.char_indices().nth(max_chars) {
96 None => s,
97 Some((idx, _)) => &s[..idx],
98 }
99}
100
101/// Removes characters from the end of the string if its length is greater than `max_chars` and
102/// appends "..." to the string. Returns string unchanged if its length is smaller than max_chars.
103pub fn truncate_and_trailoff(s: &str, max_chars: usize) -> String {
104 debug_assert!(max_chars >= 5);
105
106 // If the string's byte length is <= max_chars, walking the string can be skipped since the
107 // number of chars is <= the number of bytes.
108 if s.len() <= max_chars {
109 return s.to_string();
110 }
111 let truncation_ix = s.char_indices().map(|(i, _)| i).nth(max_chars);
112 match truncation_ix {
113 Some(index) => s[..index].to_string() + "…",
114 _ => s.to_string(),
115 }
116}
117
118/// Removes characters from the front of the string if its length is greater than `max_chars` and
119/// prepends the string with "...". Returns string unchanged if its length is smaller than max_chars.
120pub fn truncate_and_remove_front(s: &str, max_chars: usize) -> String {
121 debug_assert!(max_chars >= 5);
122
123 // If the string's byte length is <= max_chars, walking the string can be skipped since the
124 // number of chars is <= the number of bytes.
125 if s.len() <= max_chars {
126 return s.to_string();
127 }
128 let suffix_char_length = max_chars.saturating_sub(1);
129 let truncation_ix = s
130 .char_indices()
131 .map(|(i, _)| i)
132 .nth_back(suffix_char_length);
133 match truncation_ix {
134 Some(index) if index > 0 => "…".to_string() + &s[index..],
135 _ => s.to_string(),
136 }
137}
138
139/// Takes only `max_lines` from the string and, if there were more than `max_lines-1`, appends a
140/// a newline and "..." to the string, so that `max_lines` are returned.
141/// Returns string unchanged if its length is smaller than max_lines.
142pub fn truncate_lines_and_trailoff(s: &str, max_lines: usize) -> String {
143 let mut lines = s.lines().take(max_lines).collect::<Vec<_>>();
144 if lines.len() > max_lines - 1 {
145 lines.pop();
146 lines.join("\n") + "\n…"
147 } else {
148 lines.join("\n")
149 }
150}
151
152/// Truncates the string at a character boundary, such that the result is less than `max_bytes` in
153/// length.
154pub fn truncate_to_byte_limit(s: &str, max_bytes: usize) -> &str {
155 if s.len() < max_bytes {
156 return s;
157 }
158
159 for i in (0..max_bytes).rev() {
160 if s.is_char_boundary(i) {
161 return &s[..i];
162 }
163 }
164
165 ""
166}
167
168/// Takes a prefix of complete lines which fit within the byte limit. If the first line is longer
169/// than the limit, truncates at a character boundary.
170pub fn truncate_lines_to_byte_limit(s: &str, max_bytes: usize) -> &str {
171 if s.len() < max_bytes {
172 return s;
173 }
174
175 for i in (0..max_bytes).rev() {
176 if s.is_char_boundary(i) {
177 if s.as_bytes()[i] == b'\n' {
178 // Since the i-th character is \n, valid to slice at i + 1.
179 return &s[..i + 1];
180 }
181 }
182 }
183
184 truncate_to_byte_limit(s, max_bytes)
185}
186
187fn char_len_with_expanded_tabs(offset: usize, text: &str, tab_size: NonZeroU32) -> usize {
188 let tab_size = tab_size.get() as usize;
189 let mut width = offset;
190
191 for ch in text.chars() {
192 width += if ch == '\t' {
193 tab_size - (width % tab_size)
194 } else {
195 1
196 };
197 }
198
199 width - offset
200}
201
202/// Tokenizes a string into runs of text that should stick together, or that is whitespace.
203struct WordBreakingTokenizer<'a> {
204 input: &'a str,
205}
206
207impl<'a> WordBreakingTokenizer<'a> {
208 fn new(input: &'a str) -> Self {
209 Self { input }
210 }
211}
212
213fn is_char_ideographic(ch: char) -> bool {
214 use unicode_script::Script::*;
215 use unicode_script::UnicodeScript;
216 matches!(ch.script(), Han | Tangut | Yi)
217}
218
219fn is_grapheme_ideographic(text: &str) -> bool {
220 text.chars().any(is_char_ideographic)
221}
222
223fn is_grapheme_whitespace(text: &str) -> bool {
224 text.chars().any(|x| x.is_whitespace())
225}
226
227fn should_stay_with_preceding_ideograph(text: &str) -> bool {
228 text.chars().next().map_or(false, |ch| {
229 matches!(ch, '。' | '、' | ',' | '?' | '!' | ':' | ';' | '…')
230 })
231}
232
233#[derive(PartialEq, Eq, Debug, Clone, Copy)]
234enum WordBreakToken<'a> {
235 Word { token: &'a str, grapheme_len: usize },
236 InlineWhitespace { token: &'a str, grapheme_len: usize },
237 Newline,
238}
239
240impl<'a> Iterator for WordBreakingTokenizer<'a> {
241 /// Yields a span, the count of graphemes in the token, and whether it was
242 /// whitespace. Note that it also breaks at word boundaries.
243 type Item = WordBreakToken<'a>;
244
245 fn next(&mut self) -> Option<Self::Item> {
246 use unicode_segmentation::UnicodeSegmentation;
247 if self.input.is_empty() {
248 return None;
249 }
250
251 let mut iter = self.input.graphemes(true).peekable();
252 let mut offset = 0;
253 let mut grapheme_len = 0;
254 if let Some(first_grapheme) = iter.next() {
255 let is_newline = first_grapheme == "\n";
256 let is_whitespace = is_grapheme_whitespace(first_grapheme);
257 offset += first_grapheme.len();
258 grapheme_len += 1;
259 if is_grapheme_ideographic(first_grapheme) && !is_whitespace {
260 if let Some(grapheme) = iter.peek().copied() {
261 if should_stay_with_preceding_ideograph(grapheme) {
262 offset += grapheme.len();
263 grapheme_len += 1;
264 }
265 }
266 } else {
267 let mut words = self.input[offset..].split_word_bound_indices().peekable();
268 let mut next_word_bound = words.peek().copied();
269 if next_word_bound.map_or(false, |(i, _)| i == 0) {
270 next_word_bound = words.next();
271 }
272 while let Some(grapheme) = iter.peek().copied() {
273 if next_word_bound.map_or(false, |(i, _)| i == offset) {
274 break;
275 };
276 if is_grapheme_whitespace(grapheme) != is_whitespace
277 || (grapheme == "\n") != is_newline
278 {
279 break;
280 };
281 offset += grapheme.len();
282 grapheme_len += 1;
283 iter.next();
284 }
285 }
286 let token = &self.input[..offset];
287 self.input = &self.input[offset..];
288 if token == "\n" {
289 Some(WordBreakToken::Newline)
290 } else if is_whitespace {
291 Some(WordBreakToken::InlineWhitespace {
292 token,
293 grapheme_len,
294 })
295 } else {
296 Some(WordBreakToken::Word {
297 token,
298 grapheme_len,
299 })
300 }
301 } else {
302 None
303 }
304 }
305}
306
307pub fn wrap_with_prefix(
308 line_prefix: String,
309 unwrapped_text: String,
310 wrap_column: usize,
311 tab_size: NonZeroU32,
312 preserve_existing_whitespace: bool,
313) -> String {
314 let line_prefix_len = char_len_with_expanded_tabs(0, &line_prefix, tab_size);
315 let mut wrapped_text = String::new();
316 let mut current_line = line_prefix.clone();
317
318 let tokenizer = WordBreakingTokenizer::new(&unwrapped_text);
319 let mut current_line_len = line_prefix_len;
320 let mut in_whitespace = false;
321 for token in tokenizer {
322 let have_preceding_whitespace = in_whitespace;
323 match token {
324 WordBreakToken::Word {
325 token,
326 grapheme_len,
327 } => {
328 in_whitespace = false;
329 if current_line_len + grapheme_len > wrap_column
330 && current_line_len != line_prefix_len
331 {
332 wrapped_text.push_str(current_line.trim_end());
333 wrapped_text.push('\n');
334 current_line.truncate(line_prefix.len());
335 current_line_len = line_prefix_len;
336 }
337 current_line.push_str(token);
338 current_line_len += grapheme_len;
339 }
340 WordBreakToken::InlineWhitespace {
341 mut token,
342 mut grapheme_len,
343 } => {
344 in_whitespace = true;
345 if have_preceding_whitespace && !preserve_existing_whitespace {
346 continue;
347 }
348 if !preserve_existing_whitespace {
349 token = " ";
350 grapheme_len = 1;
351 }
352 if current_line_len + grapheme_len > wrap_column {
353 wrapped_text.push_str(current_line.trim_end());
354 wrapped_text.push('\n');
355 current_line.truncate(line_prefix.len());
356 current_line_len = line_prefix_len;
357 } else if current_line_len != line_prefix_len || preserve_existing_whitespace {
358 current_line.push_str(token);
359 current_line_len += grapheme_len;
360 }
361 }
362 WordBreakToken::Newline => {
363 in_whitespace = true;
364 if preserve_existing_whitespace {
365 wrapped_text.push_str(current_line.trim_end());
366 wrapped_text.push('\n');
367 current_line.truncate(line_prefix.len());
368 current_line_len = line_prefix_len;
369 } else if have_preceding_whitespace {
370 continue;
371 } else if current_line_len + 1 > wrap_column && current_line_len != line_prefix_len
372 {
373 wrapped_text.push_str(current_line.trim_end());
374 wrapped_text.push('\n');
375 current_line.truncate(line_prefix.len());
376 current_line_len = line_prefix_len;
377 } else if current_line_len != line_prefix_len {
378 current_line.push(' ');
379 current_line_len += 1;
380 }
381 }
382 }
383 }
384
385 if !current_line.is_empty() {
386 wrapped_text.push_str(¤t_line);
387 }
388 wrapped_text
389}
390
391pub fn post_inc<T: From<u8> + AddAssign<T> + Copy>(value: &mut T) -> T {
392 let prev = *value;
393 *value += T::from(1);
394 prev
395}
396
397/// Extend a sorted vector with a sorted sequence of items, maintaining the vector's sort order and
398/// enforcing a maximum length. This also de-duplicates items. Sort the items according to the given callback. Before calling this,
399/// both `vec` and `new_items` should already be sorted according to the `cmp` comparator.
400pub fn extend_sorted<T, I, F>(vec: &mut Vec<T>, new_items: I, limit: usize, mut cmp: F)
401where
402 I: IntoIterator<Item = T>,
403 F: FnMut(&T, &T) -> Ordering,
404{
405 let mut start_index = 0;
406 for new_item in new_items {
407 if let Err(i) = vec[start_index..].binary_search_by(|m| cmp(m, &new_item)) {
408 let index = start_index + i;
409 if vec.len() < limit {
410 vec.insert(index, new_item);
411 } else if index < vec.len() {
412 vec.pop();
413 vec.insert(index, new_item);
414 }
415 start_index = index;
416 }
417 }
418}
419
420pub fn truncate_to_bottom_n_sorted_by<T, F>(items: &mut Vec<T>, limit: usize, compare: &F)
421where
422 F: Fn(&T, &T) -> Ordering,
423{
424 if limit == 0 {
425 items.truncate(0);
426 }
427 if items.len() <= limit {
428 items.sort_by(compare);
429 return;
430 }
431 // When limit is near to items.len() it may be more efficient to sort the whole list and
432 // truncate, rather than always doing selection first as is done below. It's hard to analyze
433 // where the threshold for this should be since the quickselect style algorithm used by
434 // `select_nth_unstable_by` makes the prefix partially sorted, and so its work is not wasted -
435 // the expected number of comparisons needed by `sort_by` is less than it is for some arbitrary
436 // unsorted input.
437 items.select_nth_unstable_by(limit, compare);
438 items.truncate(limit);
439 items.sort_by(compare);
440}
441
442#[cfg(unix)]
443fn load_shell_from_passwd() -> Result<()> {
444 let buflen = match unsafe { libc::sysconf(libc::_SC_GETPW_R_SIZE_MAX) } {
445 n if n < 0 => 1024,
446 n => n as usize,
447 };
448 let mut buffer = Vec::with_capacity(buflen);
449
450 let mut pwd: std::mem::MaybeUninit<libc::passwd> = std::mem::MaybeUninit::uninit();
451 let mut result: *mut libc::passwd = std::ptr::null_mut();
452
453 let uid = unsafe { libc::getuid() };
454 let status = unsafe {
455 libc::getpwuid_r(
456 uid,
457 pwd.as_mut_ptr(),
458 buffer.as_mut_ptr() as *mut libc::c_char,
459 buflen,
460 &mut result,
461 )
462 };
463 let entry = unsafe { pwd.assume_init() };
464
465 anyhow::ensure!(
466 status == 0,
467 "call to getpwuid_r failed. uid: {}, status: {}",
468 uid,
469 status
470 );
471 anyhow::ensure!(!result.is_null(), "passwd entry for uid {} not found", uid);
472 anyhow::ensure!(
473 entry.pw_uid == uid,
474 "passwd entry has different uid ({}) than getuid ({}) returned",
475 entry.pw_uid,
476 uid,
477 );
478
479 let shell = unsafe { std::ffi::CStr::from_ptr(entry.pw_shell).to_str().unwrap() };
480 if env::var("SHELL").map_or(true, |shell_env| shell_env != shell) {
481 log::info!(
482 "updating SHELL environment variable to value from passwd entry: {:?}",
483 shell,
484 );
485 unsafe { env::set_var("SHELL", shell) };
486 }
487
488 Ok(())
489}
490
491#[cfg(unix)]
492pub fn load_login_shell_environment() -> Result<()> {
493 load_shell_from_passwd().log_err();
494
495 let marker = "ZED_LOGIN_SHELL_START";
496 let shell = env::var("SHELL").context(
497 "SHELL environment variable is not assigned so we can't source login environment variables",
498 )?;
499
500 // If possible, we want to `cd` in the user's `$HOME` to trigger programs
501 // such as direnv, asdf, mise, ... to adjust the PATH. These tools often hook
502 // into shell's `cd` command (and hooks) to manipulate env.
503 // We do this so that we get the env a user would have when spawning a shell
504 // in home directory.
505 let shell_cmd_prefix = std::env::var_os("HOME")
506 .and_then(|home| home.into_string().ok())
507 .map(|home| format!("cd '{home}';"));
508
509 let shell_cmd = format!(
510 "{}printf '%s' {marker}; /usr/bin/env;",
511 shell_cmd_prefix.as_deref().unwrap_or("")
512 );
513
514 let output = set_pre_exec_to_start_new_session(
515 std::process::Command::new(&shell).args(["-l", "-i", "-c", &shell_cmd]),
516 )
517 .output()
518 .context("failed to spawn login shell to source login environment variables")?;
519 anyhow::ensure!(output.status.success(), "login shell exited with error");
520
521 let stdout = String::from_utf8_lossy(&output.stdout);
522
523 if let Some(env_output_start) = stdout.find(marker) {
524 let env_output = &stdout[env_output_start + marker.len()..];
525
526 parse_env_output(env_output, |key, value| unsafe { env::set_var(key, value) });
527
528 log::info!(
529 "set environment variables from shell:{}, path:{}",
530 shell,
531 env::var("PATH").unwrap_or_default(),
532 );
533 }
534
535 Ok(())
536}
537
538/// Configures the process to start a new session, to prevent interactive shells from taking control
539/// of the terminal.
540///
541/// For more details: https://registerspill.thorstenball.com/p/how-to-lose-control-of-your-shell
542pub fn set_pre_exec_to_start_new_session(
543 command: &mut std::process::Command,
544) -> &mut std::process::Command {
545 // safety: code in pre_exec should be signal safe.
546 // https://man7.org/linux/man-pages/man7/signal-safety.7.html
547 #[cfg(not(target_os = "windows"))]
548 unsafe {
549 use std::os::unix::process::CommandExt;
550 command.pre_exec(|| {
551 libc::setsid();
552 Ok(())
553 });
554 };
555 command
556}
557
558/// Parse the result of calling `usr/bin/env` with no arguments
559pub fn parse_env_output(env: &str, mut f: impl FnMut(String, String)) {
560 let mut current_key: Option<String> = None;
561 let mut current_value: Option<String> = None;
562
563 for line in env.split_terminator('\n') {
564 if let Some(separator_index) = line.find('=') {
565 if !line[..separator_index].is_empty() {
566 if let Some((key, value)) = Option::zip(current_key.take(), current_value.take()) {
567 f(key, value)
568 }
569 current_key = Some(line[..separator_index].to_string());
570 current_value = Some(line[separator_index + 1..].to_string());
571 continue;
572 };
573 }
574 if let Some(value) = current_value.as_mut() {
575 value.push('\n');
576 value.push_str(line);
577 }
578 }
579 if let Some((key, value)) = Option::zip(current_key.take(), current_value.take()) {
580 f(key, value)
581 }
582}
583
584pub fn merge_json_lenient_value_into(
585 source: serde_json_lenient::Value,
586 target: &mut serde_json_lenient::Value,
587) {
588 match (source, target) {
589 (serde_json_lenient::Value::Object(source), serde_json_lenient::Value::Object(target)) => {
590 for (key, value) in source {
591 if let Some(target) = target.get_mut(&key) {
592 merge_json_lenient_value_into(value, target);
593 } else {
594 target.insert(key, value);
595 }
596 }
597 }
598
599 (serde_json_lenient::Value::Array(source), serde_json_lenient::Value::Array(target)) => {
600 for value in source {
601 target.push(value);
602 }
603 }
604
605 (source, target) => *target = source,
606 }
607}
608
609pub fn merge_json_value_into(source: serde_json::Value, target: &mut serde_json::Value) {
610 use serde_json::Value;
611
612 match (source, target) {
613 (Value::Object(source), Value::Object(target)) => {
614 for (key, value) in source {
615 if let Some(target) = target.get_mut(&key) {
616 merge_json_value_into(value, target);
617 } else {
618 target.insert(key, value);
619 }
620 }
621 }
622
623 (Value::Array(source), Value::Array(target)) => {
624 for value in source {
625 target.push(value);
626 }
627 }
628
629 (source, target) => *target = source,
630 }
631}
632
633pub fn merge_non_null_json_value_into(source: serde_json::Value, target: &mut serde_json::Value) {
634 use serde_json::Value;
635 if let Value::Object(source_object) = source {
636 let target_object = if let Value::Object(target) = target {
637 target
638 } else {
639 *target = Value::Object(Default::default());
640 target.as_object_mut().unwrap()
641 };
642 for (key, value) in source_object {
643 if let Some(target) = target_object.get_mut(&key) {
644 merge_non_null_json_value_into(value, target);
645 } else if !value.is_null() {
646 target_object.insert(key, value);
647 }
648 }
649 } else if !source.is_null() {
650 *target = source
651 }
652}
653
654pub fn measure<R>(label: &str, f: impl FnOnce() -> R) -> R {
655 static ZED_MEASUREMENTS: OnceLock<bool> = OnceLock::new();
656 let zed_measurements = ZED_MEASUREMENTS.get_or_init(|| {
657 env::var("ZED_MEASUREMENTS")
658 .map(|measurements| measurements == "1" || measurements == "true")
659 .unwrap_or(false)
660 });
661
662 if *zed_measurements {
663 let start = Instant::now();
664 let result = f();
665 let elapsed = start.elapsed();
666 eprintln!("{}: {:?}", label, elapsed);
667 result
668 } else {
669 f()
670 }
671}
672
673pub fn iterate_expanded_and_wrapped_usize_range(
674 range: Range<usize>,
675 additional_before: usize,
676 additional_after: usize,
677 wrap_length: usize,
678) -> impl Iterator<Item = usize> {
679 let start_wraps = range.start < additional_before;
680 let end_wraps = wrap_length < range.end + additional_after;
681 if start_wraps && end_wraps {
682 Either::Left(0..wrap_length)
683 } else if start_wraps {
684 let wrapped_start = (range.start + wrap_length).saturating_sub(additional_before);
685 if wrapped_start <= range.end {
686 Either::Left(0..wrap_length)
687 } else {
688 Either::Right((0..range.end + additional_after).chain(wrapped_start..wrap_length))
689 }
690 } else if end_wraps {
691 let wrapped_end = range.end + additional_after - wrap_length;
692 if range.start <= wrapped_end {
693 Either::Left(0..wrap_length)
694 } else {
695 Either::Right((0..wrapped_end).chain(range.start - additional_before..wrap_length))
696 }
697 } else {
698 Either::Left((range.start - additional_before)..(range.end + additional_after))
699 }
700}
701
702#[cfg(target_os = "windows")]
703pub fn get_windows_system_shell() -> String {
704 use std::path::PathBuf;
705
706 fn find_pwsh_in_programfiles(find_alternate: bool, find_preview: bool) -> Option<PathBuf> {
707 #[cfg(target_pointer_width = "64")]
708 let env_var = if find_alternate {
709 "ProgramFiles(x86)"
710 } else {
711 "ProgramFiles"
712 };
713
714 #[cfg(target_pointer_width = "32")]
715 let env_var = if find_alternate {
716 "ProgramW6432"
717 } else {
718 "ProgramFiles"
719 };
720
721 let install_base_dir = PathBuf::from(std::env::var_os(env_var)?).join("PowerShell");
722 install_base_dir
723 .read_dir()
724 .ok()?
725 .filter_map(Result::ok)
726 .filter(|entry| matches!(entry.file_type(), Ok(ft) if ft.is_dir()))
727 .filter_map(|entry| {
728 let dir_name = entry.file_name();
729 let dir_name = dir_name.to_string_lossy();
730
731 let version = if find_preview {
732 let dash_index = dir_name.find('-')?;
733 if &dir_name[dash_index + 1..] != "preview" {
734 return None;
735 };
736 dir_name[..dash_index].parse::<u32>().ok()?
737 } else {
738 dir_name.parse::<u32>().ok()?
739 };
740
741 let exe_path = entry.path().join("pwsh.exe");
742 if exe_path.exists() {
743 Some((version, exe_path))
744 } else {
745 None
746 }
747 })
748 .max_by_key(|(version, _)| *version)
749 .map(|(_, path)| path)
750 }
751
752 fn find_pwsh_in_msix(find_preview: bool) -> Option<PathBuf> {
753 let msix_app_dir =
754 PathBuf::from(std::env::var_os("LOCALAPPDATA")?).join("Microsoft\\WindowsApps");
755 if !msix_app_dir.exists() {
756 return None;
757 }
758
759 let prefix = if find_preview {
760 "Microsoft.PowerShellPreview_"
761 } else {
762 "Microsoft.PowerShell_"
763 };
764 msix_app_dir
765 .read_dir()
766 .ok()?
767 .filter_map(|entry| {
768 let entry = entry.ok()?;
769 if !matches!(entry.file_type(), Ok(ft) if ft.is_dir()) {
770 return None;
771 }
772
773 if !entry.file_name().to_string_lossy().starts_with(prefix) {
774 return None;
775 }
776
777 let exe_path = entry.path().join("pwsh.exe");
778 exe_path.exists().then_some(exe_path)
779 })
780 .next()
781 }
782
783 fn find_pwsh_in_scoop() -> Option<PathBuf> {
784 let pwsh_exe =
785 PathBuf::from(std::env::var_os("USERPROFILE")?).join("scoop\\shims\\pwsh.exe");
786 pwsh_exe.exists().then_some(pwsh_exe)
787 }
788
789 static SYSTEM_SHELL: LazyLock<String> = LazyLock::new(|| {
790 find_pwsh_in_programfiles(false, false)
791 .or_else(|| find_pwsh_in_programfiles(true, false))
792 .or_else(|| find_pwsh_in_msix(false))
793 .or_else(|| find_pwsh_in_programfiles(false, true))
794 .or_else(|| find_pwsh_in_msix(true))
795 .or_else(|| find_pwsh_in_programfiles(true, true))
796 .or_else(find_pwsh_in_scoop)
797 .map(|p| p.to_string_lossy().to_string())
798 .unwrap_or("powershell.exe".to_string())
799 });
800
801 (*SYSTEM_SHELL).clone()
802}
803
804pub trait ResultExt<E> {
805 type Ok;
806
807 fn log_err(self) -> Option<Self::Ok>;
808 /// Assert that this result should never be an error in development or tests.
809 fn debug_assert_ok(self, reason: &str) -> Self;
810 fn warn_on_err(self) -> Option<Self::Ok>;
811 fn log_with_level(self, level: log::Level) -> Option<Self::Ok>;
812 fn anyhow(self) -> anyhow::Result<Self::Ok>
813 where
814 E: Into<anyhow::Error>;
815}
816
817impl<T, E> ResultExt<E> for Result<T, E>
818where
819 E: std::fmt::Debug,
820{
821 type Ok = T;
822
823 #[track_caller]
824 fn log_err(self) -> Option<T> {
825 self.log_with_level(log::Level::Error)
826 }
827
828 #[track_caller]
829 fn debug_assert_ok(self, reason: &str) -> Self {
830 if let Err(error) = &self {
831 debug_panic!("{reason} - {error:?}");
832 }
833 self
834 }
835
836 #[track_caller]
837 fn warn_on_err(self) -> Option<T> {
838 self.log_with_level(log::Level::Warn)
839 }
840
841 #[track_caller]
842 fn log_with_level(self, level: log::Level) -> Option<T> {
843 match self {
844 Ok(value) => Some(value),
845 Err(error) => {
846 log_error_with_caller(*Location::caller(), error, level);
847 None
848 }
849 }
850 }
851
852 fn anyhow(self) -> anyhow::Result<T>
853 where
854 E: Into<anyhow::Error>,
855 {
856 self.map_err(Into::into)
857 }
858}
859
860fn log_error_with_caller<E>(caller: core::panic::Location<'_>, error: E, level: log::Level)
861where
862 E: std::fmt::Debug,
863{
864 #[cfg(not(target_os = "windows"))]
865 let file = caller.file();
866 #[cfg(target_os = "windows")]
867 let file = caller.file().replace('\\', "/");
868 // In this codebase, the first segment of the file path is
869 // the 'crates' folder, followed by the crate name.
870 let target = file.split('/').nth(1);
871
872 log::logger().log(
873 &log::Record::builder()
874 .target(target.unwrap_or(""))
875 .module_path(target)
876 .args(format_args!("{:?}", error))
877 .file(Some(caller.file()))
878 .line(Some(caller.line()))
879 .level(level)
880 .build(),
881 );
882}
883
884pub fn log_err<E: std::fmt::Debug>(error: &E) {
885 log_error_with_caller(*Location::caller(), error, log::Level::Warn);
886}
887
888pub trait TryFutureExt {
889 fn log_err(self) -> LogErrorFuture<Self>
890 where
891 Self: Sized;
892
893 fn log_tracked_err(self, location: core::panic::Location<'static>) -> LogErrorFuture<Self>
894 where
895 Self: Sized;
896
897 fn warn_on_err(self) -> LogErrorFuture<Self>
898 where
899 Self: Sized;
900 fn unwrap(self) -> UnwrapFuture<Self>
901 where
902 Self: Sized;
903}
904
905impl<F, T, E> TryFutureExt for F
906where
907 F: Future<Output = Result<T, E>>,
908 E: std::fmt::Debug,
909{
910 #[track_caller]
911 fn log_err(self) -> LogErrorFuture<Self>
912 where
913 Self: Sized,
914 {
915 let location = Location::caller();
916 LogErrorFuture(self, log::Level::Error, *location)
917 }
918
919 fn log_tracked_err(self, location: core::panic::Location<'static>) -> LogErrorFuture<Self>
920 where
921 Self: Sized,
922 {
923 LogErrorFuture(self, log::Level::Error, location)
924 }
925
926 #[track_caller]
927 fn warn_on_err(self) -> LogErrorFuture<Self>
928 where
929 Self: Sized,
930 {
931 let location = Location::caller();
932 LogErrorFuture(self, log::Level::Warn, *location)
933 }
934
935 fn unwrap(self) -> UnwrapFuture<Self>
936 where
937 Self: Sized,
938 {
939 UnwrapFuture(self)
940 }
941}
942
943#[must_use]
944pub struct LogErrorFuture<F>(F, log::Level, core::panic::Location<'static>);
945
946impl<F, T, E> Future for LogErrorFuture<F>
947where
948 F: Future<Output = Result<T, E>>,
949 E: std::fmt::Debug,
950{
951 type Output = Option<T>;
952
953 fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
954 let level = self.1;
955 let location = self.2;
956 let inner = unsafe { Pin::new_unchecked(&mut self.get_unchecked_mut().0) };
957 match inner.poll(cx) {
958 Poll::Ready(output) => Poll::Ready(match output {
959 Ok(output) => Some(output),
960 Err(error) => {
961 log_error_with_caller(location, error, level);
962 None
963 }
964 }),
965 Poll::Pending => Poll::Pending,
966 }
967 }
968}
969
970pub struct UnwrapFuture<F>(F);
971
972impl<F, T, E> Future for UnwrapFuture<F>
973where
974 F: Future<Output = Result<T, E>>,
975 E: std::fmt::Debug,
976{
977 type Output = T;
978
979 fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
980 let inner = unsafe { Pin::new_unchecked(&mut self.get_unchecked_mut().0) };
981 match inner.poll(cx) {
982 Poll::Ready(result) => Poll::Ready(result.unwrap()),
983 Poll::Pending => Poll::Pending,
984 }
985 }
986}
987
988pub struct Deferred<F: FnOnce()>(Option<F>);
989
990impl<F: FnOnce()> Deferred<F> {
991 /// Drop without running the deferred function.
992 pub fn abort(mut self) {
993 self.0.take();
994 }
995}
996
997impl<F: FnOnce()> Drop for Deferred<F> {
998 fn drop(&mut self) {
999 if let Some(f) = self.0.take() {
1000 f()
1001 }
1002 }
1003}
1004
1005/// Run the given function when the returned value is dropped (unless it's cancelled).
1006#[must_use]
1007pub fn defer<F: FnOnce()>(f: F) -> Deferred<F> {
1008 Deferred(Some(f))
1009}
1010
1011#[cfg(any(test, feature = "test-support"))]
1012mod rng {
1013 use rand::{Rng, seq::SliceRandom};
1014 pub struct RandomCharIter<T: Rng> {
1015 rng: T,
1016 simple_text: bool,
1017 }
1018
1019 impl<T: Rng> RandomCharIter<T> {
1020 pub fn new(rng: T) -> Self {
1021 Self {
1022 rng,
1023 simple_text: std::env::var("SIMPLE_TEXT").map_or(false, |v| !v.is_empty()),
1024 }
1025 }
1026
1027 pub fn with_simple_text(mut self) -> Self {
1028 self.simple_text = true;
1029 self
1030 }
1031 }
1032
1033 impl<T: Rng> Iterator for RandomCharIter<T> {
1034 type Item = char;
1035
1036 fn next(&mut self) -> Option<Self::Item> {
1037 if self.simple_text {
1038 return if self.rng.gen_range(0..100) < 5 {
1039 Some('\n')
1040 } else {
1041 Some(self.rng.gen_range(b'a'..b'z' + 1).into())
1042 };
1043 }
1044
1045 match self.rng.gen_range(0..100) {
1046 // whitespace
1047 0..=19 => [' ', '\n', '\r', '\t'].choose(&mut self.rng).copied(),
1048 // two-byte greek letters
1049 20..=32 => char::from_u32(self.rng.gen_range(('α' as u32)..('ω' as u32 + 1))),
1050 // // three-byte characters
1051 33..=45 => ['✋', '✅', '❌', '❎', '⭐']
1052 .choose(&mut self.rng)
1053 .copied(),
1054 // // four-byte characters
1055 46..=58 => ['🍐', '🏀', '🍗', '🎉'].choose(&mut self.rng).copied(),
1056 // ascii letters
1057 _ => Some(self.rng.gen_range(b'a'..b'z' + 1).into()),
1058 }
1059 }
1060 }
1061}
1062#[cfg(any(test, feature = "test-support"))]
1063pub use rng::RandomCharIter;
1064/// Get an embedded file as a string.
1065pub fn asset_str<A: rust_embed::RustEmbed>(path: &str) -> Cow<'static, str> {
1066 match A::get(path).expect(path).data {
1067 Cow::Borrowed(bytes) => Cow::Borrowed(std::str::from_utf8(bytes).unwrap()),
1068 Cow::Owned(bytes) => Cow::Owned(String::from_utf8(bytes).unwrap()),
1069 }
1070}
1071
1072/// Expands to an immediately-invoked function expression. Good for using the ? operator
1073/// in functions which do not return an Option or Result.
1074///
1075/// Accepts a normal block, an async block, or an async move block.
1076#[macro_export]
1077macro_rules! maybe {
1078 ($block:block) => {
1079 (|| $block)()
1080 };
1081 (async $block:block) => {
1082 (|| async $block)()
1083 };
1084 (async move $block:block) => {
1085 (|| async move $block)()
1086 };
1087}
1088
1089pub trait RangeExt<T> {
1090 fn sorted(&self) -> Self;
1091 fn to_inclusive(&self) -> RangeInclusive<T>;
1092 fn overlaps(&self, other: &Range<T>) -> bool;
1093 fn contains_inclusive(&self, other: &Range<T>) -> bool;
1094}
1095
1096impl<T: Ord + Clone> RangeExt<T> for Range<T> {
1097 fn sorted(&self) -> Self {
1098 cmp::min(&self.start, &self.end).clone()..cmp::max(&self.start, &self.end).clone()
1099 }
1100
1101 fn to_inclusive(&self) -> RangeInclusive<T> {
1102 self.start.clone()..=self.end.clone()
1103 }
1104
1105 fn overlaps(&self, other: &Range<T>) -> bool {
1106 self.start < other.end && other.start < self.end
1107 }
1108
1109 fn contains_inclusive(&self, other: &Range<T>) -> bool {
1110 self.start <= other.start && other.end <= self.end
1111 }
1112}
1113
1114impl<T: Ord + Clone> RangeExt<T> for RangeInclusive<T> {
1115 fn sorted(&self) -> Self {
1116 cmp::min(self.start(), self.end()).clone()..=cmp::max(self.start(), self.end()).clone()
1117 }
1118
1119 fn to_inclusive(&self) -> RangeInclusive<T> {
1120 self.clone()
1121 }
1122
1123 fn overlaps(&self, other: &Range<T>) -> bool {
1124 self.start() < &other.end && &other.start <= self.end()
1125 }
1126
1127 fn contains_inclusive(&self, other: &Range<T>) -> bool {
1128 self.start() <= &other.start && &other.end <= self.end()
1129 }
1130}
1131
1132/// A way to sort strings with starting numbers numerically first, falling back to alphanumeric one,
1133/// case-insensitive.
1134///
1135/// This is useful for turning regular alphanumerically sorted sequences as `1-abc, 10, 11-def, .., 2, 21-abc`
1136/// into `1-abc, 2, 10, 11-def, .., 21-abc`
1137#[derive(Debug, PartialEq, Eq)]
1138pub struct NumericPrefixWithSuffix<'a>(Option<u64>, &'a str);
1139
1140impl<'a> NumericPrefixWithSuffix<'a> {
1141 pub fn from_numeric_prefixed_str(str: &'a str) -> Self {
1142 let i = str.chars().take_while(|c| c.is_ascii_digit()).count();
1143 let (prefix, remainder) = str.split_at(i);
1144
1145 let prefix = prefix.parse().ok();
1146 Self(prefix, remainder)
1147 }
1148}
1149
1150/// When dealing with equality, we need to consider the case of the strings to achieve strict equality
1151/// to handle cases like "a" < "A" instead of "a" == "A".
1152impl Ord for NumericPrefixWithSuffix<'_> {
1153 fn cmp(&self, other: &Self) -> Ordering {
1154 match (self.0, other.0) {
1155 (None, None) => UniCase::new(self.1)
1156 .cmp(&UniCase::new(other.1))
1157 .then_with(|| self.1.cmp(other.1).reverse()),
1158 (None, Some(_)) => Ordering::Greater,
1159 (Some(_), None) => Ordering::Less,
1160 (Some(a), Some(b)) => a.cmp(&b).then_with(|| {
1161 UniCase::new(self.1)
1162 .cmp(&UniCase::new(other.1))
1163 .then_with(|| self.1.cmp(other.1).reverse())
1164 }),
1165 }
1166 }
1167}
1168
1169impl PartialOrd for NumericPrefixWithSuffix<'_> {
1170 fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
1171 Some(self.cmp(other))
1172 }
1173}
1174
1175/// Capitalizes the first character of a string.
1176///
1177/// This function takes a string slice as input and returns a new `String` with the first character
1178/// capitalized.
1179///
1180/// # Examples
1181///
1182/// ```
1183/// use util::capitalize;
1184///
1185/// assert_eq!(capitalize("hello"), "Hello");
1186/// assert_eq!(capitalize("WORLD"), "WORLD");
1187/// assert_eq!(capitalize(""), "");
1188/// ```
1189pub fn capitalize(str: &str) -> String {
1190 let mut chars = str.chars();
1191 match chars.next() {
1192 None => String::new(),
1193 Some(first_char) => first_char.to_uppercase().collect::<String>() + chars.as_str(),
1194 }
1195}
1196
1197fn emoji_regex() -> &'static Regex {
1198 static EMOJI_REGEX: LazyLock<Regex> =
1199 LazyLock::new(|| Regex::new("(\\p{Emoji}|\u{200D})").unwrap());
1200 &EMOJI_REGEX
1201}
1202
1203/// Returns true if the given string consists of emojis only.
1204/// E.g. "👨👩👧👧👋" will return true, but "👋!" will return false.
1205pub fn word_consists_of_emojis(s: &str) -> bool {
1206 let mut prev_end = 0;
1207 for capture in emoji_regex().find_iter(s) {
1208 if capture.start() != prev_end {
1209 return false;
1210 }
1211 prev_end = capture.end();
1212 }
1213 prev_end == s.len()
1214}
1215
1216/// Similar to `str::split`, but also provides byte-offset ranges of the results. Unlike
1217/// `str::split`, this is not generic on pattern types and does not return an `Iterator`.
1218pub fn split_str_with_ranges(s: &str, pat: impl Fn(char) -> bool) -> Vec<(Range<usize>, &str)> {
1219 let mut result = Vec::new();
1220 let mut start = 0;
1221
1222 for (i, ch) in s.char_indices() {
1223 if pat(ch) {
1224 if i > start {
1225 result.push((start..i, &s[start..i]));
1226 }
1227 start = i + ch.len_utf8();
1228 }
1229 }
1230
1231 if s.len() > start {
1232 result.push((start..s.len(), &s[start..s.len()]));
1233 }
1234
1235 result
1236}
1237
1238pub fn default<D: Default>() -> D {
1239 Default::default()
1240}
1241
1242pub fn get_system_shell() -> String {
1243 #[cfg(target_os = "windows")]
1244 {
1245 get_windows_system_shell()
1246 }
1247
1248 #[cfg(not(target_os = "windows"))]
1249 {
1250 std::env::var("SHELL").unwrap_or("/bin/sh".to_string())
1251 }
1252}
1253
1254#[derive(Debug)]
1255pub enum ConnectionResult<O> {
1256 Timeout,
1257 ConnectionReset,
1258 Result(anyhow::Result<O>),
1259}
1260
1261impl<O> ConnectionResult<O> {
1262 pub fn into_response(self) -> anyhow::Result<O> {
1263 match self {
1264 ConnectionResult::Timeout => anyhow::bail!("Request timed out"),
1265 ConnectionResult::ConnectionReset => anyhow::bail!("Server reset the connection"),
1266 ConnectionResult::Result(r) => r,
1267 }
1268 }
1269}
1270
1271impl<O> From<anyhow::Result<O>> for ConnectionResult<O> {
1272 fn from(result: anyhow::Result<O>) -> Self {
1273 ConnectionResult::Result(result)
1274 }
1275}
1276
1277#[cfg(test)]
1278mod tests {
1279 use super::*;
1280
1281 #[test]
1282 fn test_extend_sorted() {
1283 let mut vec = vec![];
1284
1285 extend_sorted(&mut vec, vec![21, 17, 13, 8, 1, 0], 5, |a, b| b.cmp(a));
1286 assert_eq!(vec, &[21, 17, 13, 8, 1]);
1287
1288 extend_sorted(&mut vec, vec![101, 19, 17, 8, 2], 8, |a, b| b.cmp(a));
1289 assert_eq!(vec, &[101, 21, 19, 17, 13, 8, 2, 1]);
1290
1291 extend_sorted(&mut vec, vec![1000, 19, 17, 9, 5], 8, |a, b| b.cmp(a));
1292 assert_eq!(vec, &[1000, 101, 21, 19, 17, 13, 9, 8]);
1293 }
1294
1295 #[test]
1296 fn test_truncate_to_bottom_n_sorted_by() {
1297 let mut vec: Vec<u32> = vec![5, 2, 3, 4, 1];
1298 truncate_to_bottom_n_sorted_by(&mut vec, 10, &u32::cmp);
1299 assert_eq!(vec, &[1, 2, 3, 4, 5]);
1300
1301 vec = vec![5, 2, 3, 4, 1];
1302 truncate_to_bottom_n_sorted_by(&mut vec, 5, &u32::cmp);
1303 assert_eq!(vec, &[1, 2, 3, 4, 5]);
1304
1305 vec = vec![5, 2, 3, 4, 1];
1306 truncate_to_bottom_n_sorted_by(&mut vec, 4, &u32::cmp);
1307 assert_eq!(vec, &[1, 2, 3, 4]);
1308
1309 vec = vec![5, 2, 3, 4, 1];
1310 truncate_to_bottom_n_sorted_by(&mut vec, 1, &u32::cmp);
1311 assert_eq!(vec, &[1]);
1312
1313 vec = vec![5, 2, 3, 4, 1];
1314 truncate_to_bottom_n_sorted_by(&mut vec, 0, &u32::cmp);
1315 assert!(vec.is_empty());
1316 }
1317
1318 #[test]
1319 fn test_iife() {
1320 fn option_returning_function() -> Option<()> {
1321 None
1322 }
1323
1324 let foo = maybe!({
1325 option_returning_function()?;
1326 Some(())
1327 });
1328
1329 assert_eq!(foo, None);
1330 }
1331
1332 #[test]
1333 fn test_truncate_and_trailoff() {
1334 assert_eq!(truncate_and_trailoff("", 5), "");
1335 assert_eq!(truncate_and_trailoff("aaaaaa", 7), "aaaaaa");
1336 assert_eq!(truncate_and_trailoff("aaaaaa", 6), "aaaaaa");
1337 assert_eq!(truncate_and_trailoff("aaaaaa", 5), "aaaaa…");
1338 assert_eq!(truncate_and_trailoff("èèèèèè", 7), "èèèèèè");
1339 assert_eq!(truncate_and_trailoff("èèèèèè", 6), "èèèèèè");
1340 assert_eq!(truncate_and_trailoff("èèèèèè", 5), "èèèèè…");
1341 }
1342
1343 #[test]
1344 fn test_truncate_and_remove_front() {
1345 assert_eq!(truncate_and_remove_front("", 5), "");
1346 assert_eq!(truncate_and_remove_front("aaaaaa", 7), "aaaaaa");
1347 assert_eq!(truncate_and_remove_front("aaaaaa", 6), "aaaaaa");
1348 assert_eq!(truncate_and_remove_front("aaaaaa", 5), "…aaaaa");
1349 assert_eq!(truncate_and_remove_front("èèèèèè", 7), "èèèèèè");
1350 assert_eq!(truncate_and_remove_front("èèèèèè", 6), "èèèèèè");
1351 assert_eq!(truncate_and_remove_front("èèèèèè", 5), "…èèèèè");
1352 }
1353
1354 #[test]
1355 fn test_numeric_prefix_str_method() {
1356 let target = "1a";
1357 assert_eq!(
1358 NumericPrefixWithSuffix::from_numeric_prefixed_str(target),
1359 NumericPrefixWithSuffix(Some(1), "a")
1360 );
1361
1362 let target = "12ab";
1363 assert_eq!(
1364 NumericPrefixWithSuffix::from_numeric_prefixed_str(target),
1365 NumericPrefixWithSuffix(Some(12), "ab")
1366 );
1367
1368 let target = "12_ab";
1369 assert_eq!(
1370 NumericPrefixWithSuffix::from_numeric_prefixed_str(target),
1371 NumericPrefixWithSuffix(Some(12), "_ab")
1372 );
1373
1374 let target = "1_2ab";
1375 assert_eq!(
1376 NumericPrefixWithSuffix::from_numeric_prefixed_str(target),
1377 NumericPrefixWithSuffix(Some(1), "_2ab")
1378 );
1379
1380 let target = "1.2";
1381 assert_eq!(
1382 NumericPrefixWithSuffix::from_numeric_prefixed_str(target),
1383 NumericPrefixWithSuffix(Some(1), ".2")
1384 );
1385
1386 let target = "1.2_a";
1387 assert_eq!(
1388 NumericPrefixWithSuffix::from_numeric_prefixed_str(target),
1389 NumericPrefixWithSuffix(Some(1), ".2_a")
1390 );
1391
1392 let target = "12.2_a";
1393 assert_eq!(
1394 NumericPrefixWithSuffix::from_numeric_prefixed_str(target),
1395 NumericPrefixWithSuffix(Some(12), ".2_a")
1396 );
1397
1398 let target = "12a.2_a";
1399 assert_eq!(
1400 NumericPrefixWithSuffix::from_numeric_prefixed_str(target),
1401 NumericPrefixWithSuffix(Some(12), "a.2_a")
1402 );
1403 }
1404
1405 #[test]
1406 fn test_numeric_prefix_with_suffix() {
1407 let mut sorted = vec!["1-abc", "10", "11def", "2", "21-abc"];
1408 sorted.sort_by_key(|s| NumericPrefixWithSuffix::from_numeric_prefixed_str(s));
1409 assert_eq!(sorted, ["1-abc", "2", "10", "11def", "21-abc"]);
1410
1411 for numeric_prefix_less in ["numeric_prefix_less", "aaa", "~™£"] {
1412 assert_eq!(
1413 NumericPrefixWithSuffix::from_numeric_prefixed_str(numeric_prefix_less),
1414 NumericPrefixWithSuffix(None, numeric_prefix_less),
1415 "String without numeric prefix `{numeric_prefix_less}` should not be converted into NumericPrefixWithSuffix"
1416 )
1417 }
1418 }
1419
1420 #[test]
1421 fn test_word_consists_of_emojis() {
1422 let words_to_test = vec![
1423 ("👨👩👧👧👋🥒", true),
1424 ("👋", true),
1425 ("!👋", false),
1426 ("👋!", false),
1427 ("👋 ", false),
1428 (" 👋", false),
1429 ("Test", false),
1430 ];
1431
1432 for (text, expected_result) in words_to_test {
1433 assert_eq!(word_consists_of_emojis(text), expected_result);
1434 }
1435 }
1436
1437 #[test]
1438 fn test_truncate_lines_and_trailoff() {
1439 let text = r#"Line 1
1440Line 2
1441Line 3"#;
1442
1443 assert_eq!(
1444 truncate_lines_and_trailoff(text, 2),
1445 r#"Line 1
1446…"#
1447 );
1448
1449 assert_eq!(
1450 truncate_lines_and_trailoff(text, 3),
1451 r#"Line 1
1452Line 2
1453…"#
1454 );
1455
1456 assert_eq!(
1457 truncate_lines_and_trailoff(text, 4),
1458 r#"Line 1
1459Line 2
1460Line 3"#
1461 );
1462 }
1463
1464 #[test]
1465 fn test_iterate_expanded_and_wrapped_usize_range() {
1466 // Neither wrap
1467 assert_eq!(
1468 iterate_expanded_and_wrapped_usize_range(2..4, 1, 1, 8).collect::<Vec<usize>>(),
1469 (1..5).collect::<Vec<usize>>()
1470 );
1471 // Start wraps
1472 assert_eq!(
1473 iterate_expanded_and_wrapped_usize_range(2..4, 3, 1, 8).collect::<Vec<usize>>(),
1474 ((0..5).chain(7..8)).collect::<Vec<usize>>()
1475 );
1476 // Start wraps all the way around
1477 assert_eq!(
1478 iterate_expanded_and_wrapped_usize_range(2..4, 5, 1, 8).collect::<Vec<usize>>(),
1479 (0..8).collect::<Vec<usize>>()
1480 );
1481 // Start wraps all the way around and past 0
1482 assert_eq!(
1483 iterate_expanded_and_wrapped_usize_range(2..4, 10, 1, 8).collect::<Vec<usize>>(),
1484 (0..8).collect::<Vec<usize>>()
1485 );
1486 // End wraps
1487 assert_eq!(
1488 iterate_expanded_and_wrapped_usize_range(3..5, 1, 4, 8).collect::<Vec<usize>>(),
1489 (0..1).chain(2..8).collect::<Vec<usize>>()
1490 );
1491 // End wraps all the way around
1492 assert_eq!(
1493 iterate_expanded_and_wrapped_usize_range(3..5, 1, 5, 8).collect::<Vec<usize>>(),
1494 (0..8).collect::<Vec<usize>>()
1495 );
1496 // End wraps all the way around and past the end
1497 assert_eq!(
1498 iterate_expanded_and_wrapped_usize_range(3..5, 1, 10, 8).collect::<Vec<usize>>(),
1499 (0..8).collect::<Vec<usize>>()
1500 );
1501 // Both start and end wrap
1502 assert_eq!(
1503 iterate_expanded_and_wrapped_usize_range(3..5, 4, 4, 8).collect::<Vec<usize>>(),
1504 (0..8).collect::<Vec<usize>>()
1505 );
1506 }
1507
1508 #[test]
1509 fn test_truncate_lines_to_byte_limit() {
1510 let text = "Line 1\nLine 2\nLine 3\nLine 4";
1511
1512 // Limit that includes all lines
1513 assert_eq!(truncate_lines_to_byte_limit(text, 100), text);
1514
1515 // Exactly the first line
1516 assert_eq!(truncate_lines_to_byte_limit(text, 7), "Line 1\n");
1517
1518 // Limit between lines
1519 assert_eq!(truncate_lines_to_byte_limit(text, 13), "Line 1\n");
1520 assert_eq!(truncate_lines_to_byte_limit(text, 20), "Line 1\nLine 2\n");
1521
1522 // Limit before first newline
1523 assert_eq!(truncate_lines_to_byte_limit(text, 6), "Line ");
1524
1525 // Test with non-ASCII characters
1526 let text_utf8 = "Line 1\nLíne 2\nLine 3";
1527 assert_eq!(
1528 truncate_lines_to_byte_limit(text_utf8, 15),
1529 "Line 1\nLíne 2\n"
1530 );
1531 }
1532
1533 #[test]
1534 fn test_string_size_with_expanded_tabs() {
1535 let nz = |val| NonZeroU32::new(val).unwrap();
1536 assert_eq!(char_len_with_expanded_tabs(0, "", nz(4)), 0);
1537 assert_eq!(char_len_with_expanded_tabs(0, "hello", nz(4)), 5);
1538 assert_eq!(char_len_with_expanded_tabs(0, "\thello", nz(4)), 9);
1539 assert_eq!(char_len_with_expanded_tabs(0, "abc\tab", nz(4)), 6);
1540 assert_eq!(char_len_with_expanded_tabs(0, "hello\t", nz(4)), 8);
1541 assert_eq!(char_len_with_expanded_tabs(0, "\t\t", nz(8)), 16);
1542 assert_eq!(char_len_with_expanded_tabs(0, "x\t", nz(8)), 8);
1543 assert_eq!(char_len_with_expanded_tabs(7, "x\t", nz(8)), 9);
1544 }
1545
1546 #[test]
1547 fn test_word_breaking_tokenizer() {
1548 let tests: &[(&str, &[WordBreakToken<'static>])] = &[
1549 ("", &[]),
1550 (" ", &[whitespace(" ", 2)]),
1551 ("Ʒ", &[word("Ʒ", 1)]),
1552 ("Ǽ", &[word("Ǽ", 1)]),
1553 ("⋑", &[word("⋑", 1)]),
1554 ("⋑⋑", &[word("⋑⋑", 2)]),
1555 (
1556 "原理,进而",
1557 &[word("原", 1), word("理,", 2), word("进", 1), word("而", 1)],
1558 ),
1559 (
1560 "hello world",
1561 &[word("hello", 5), whitespace(" ", 1), word("world", 5)],
1562 ),
1563 (
1564 "hello, world",
1565 &[word("hello,", 6), whitespace(" ", 1), word("world", 5)],
1566 ),
1567 (
1568 " hello world",
1569 &[
1570 whitespace(" ", 2),
1571 word("hello", 5),
1572 whitespace(" ", 1),
1573 word("world", 5),
1574 ],
1575 ),
1576 (
1577 "这是什么 \n 钢笔",
1578 &[
1579 word("这", 1),
1580 word("是", 1),
1581 word("什", 1),
1582 word("么", 1),
1583 whitespace(" ", 1),
1584 newline(),
1585 whitespace(" ", 1),
1586 word("钢", 1),
1587 word("笔", 1),
1588 ],
1589 ),
1590 (" mutton", &[whitespace(" ", 1), word("mutton", 6)]),
1591 ];
1592
1593 fn word(token: &'static str, grapheme_len: usize) -> WordBreakToken<'static> {
1594 WordBreakToken::Word {
1595 token,
1596 grapheme_len,
1597 }
1598 }
1599
1600 fn whitespace(token: &'static str, grapheme_len: usize) -> WordBreakToken<'static> {
1601 WordBreakToken::InlineWhitespace {
1602 token,
1603 grapheme_len,
1604 }
1605 }
1606
1607 fn newline() -> WordBreakToken<'static> {
1608 WordBreakToken::Newline
1609 }
1610
1611 for (input, result) in tests {
1612 assert_eq!(
1613 WordBreakingTokenizer::new(input)
1614 .collect::<Vec<_>>()
1615 .as_slice(),
1616 *result,
1617 );
1618 }
1619 }
1620
1621 #[test]
1622 fn test_wrap_with_prefix() {
1623 assert_eq!(
1624 wrap_with_prefix(
1625 "# ".to_string(),
1626 "abcdefg".to_string(),
1627 4,
1628 NonZeroU32::new(4).unwrap(),
1629 false,
1630 ),
1631 "# abcdefg"
1632 );
1633 assert_eq!(
1634 wrap_with_prefix(
1635 "".to_string(),
1636 "\thello world".to_string(),
1637 8,
1638 NonZeroU32::new(4).unwrap(),
1639 false,
1640 ),
1641 "hello\nworld"
1642 );
1643 assert_eq!(
1644 wrap_with_prefix(
1645 "// ".to_string(),
1646 "xx \nyy zz aa bb cc".to_string(),
1647 12,
1648 NonZeroU32::new(4).unwrap(),
1649 false,
1650 ),
1651 "// xx yy zz\n// aa bb cc"
1652 );
1653 assert_eq!(
1654 wrap_with_prefix(
1655 String::new(),
1656 "这是什么 \n 钢笔".to_string(),
1657 3,
1658 NonZeroU32::new(4).unwrap(),
1659 false,
1660 ),
1661 "这是什\n么 钢\n笔"
1662 );
1663 }
1664
1665 #[test]
1666 fn test_split_with_ranges() {
1667 let input = "hi";
1668 let result = split_str_with_ranges(input, |c| c == ' ');
1669
1670 assert_eq!(result.len(), 1);
1671 assert_eq!(result[0], (0..2, "hi"));
1672
1673 let input = "héllo🦀world";
1674 let result = split_str_with_ranges(input, |c| c == '🦀');
1675
1676 assert_eq!(result.len(), 2);
1677 assert_eq!(result[0], (0..6, "héllo")); // 'é' is 2 bytes
1678 assert_eq!(result[1], (10..15, "world")); // '🦀' is 4 bytes
1679 }
1680}