1use collections::HashMap;
2use fs::MTime;
3use std::{path::Path, sync::Arc};
4
5const MAX_FILES_BEFORE_RESUMMARIZE: usize = 4;
6const MAX_BYTES_BEFORE_RESUMMARIZE: u64 = 1_000_000; // 1 MB
7
8#[derive(Default, Debug)]
9pub struct SummaryBacklog {
10 /// Key: path to a file that needs summarization, but that we haven't summarized yet. Value: that file's size on disk, in bytes, and its mtime.
11 files: HashMap<Arc<Path>, (u64, Option<MTime>)>,
12 /// Cache of the sum of all values in `files`, so we don't have to traverse the whole map to check if we're over the byte limit.
13 total_bytes: u64,
14}
15
16impl SummaryBacklog {
17 /// Store the given path in the backlog, along with how many bytes are in it.
18 pub fn insert(&mut self, path: Arc<Path>, bytes_on_disk: u64, mtime: Option<MTime>) {
19 let (prev_bytes, _) = self
20 .files
21 .insert(path, (bytes_on_disk, mtime))
22 .unwrap_or_default(); // Default to 0 prev_bytes
23
24 // Update the cached total by subtracting out the old amount and adding the new one.
25 self.total_bytes = self.total_bytes - prev_bytes + bytes_on_disk;
26 }
27
28 /// Returns true if the total number of bytes in the backlog exceeds a predefined threshold.
29 pub fn needs_drain(&self) -> bool {
30 self.files.len() > MAX_FILES_BEFORE_RESUMMARIZE ||
31 // The whole purpose of the cached total_bytes is to make this comparison cheap.
32 // Otherwise we'd have to traverse the entire dictionary every time we wanted this answer.
33 self.total_bytes > MAX_BYTES_BEFORE_RESUMMARIZE
34 }
35
36 /// Remove all the entries in the backlog and return the file paths as an iterator.
37 #[allow(clippy::needless_lifetimes)] // Clippy thinks this 'a can be elided, but eliding it gives a compile error
38 pub fn drain<'a>(&'a mut self) -> impl Iterator<Item = (Arc<Path>, Option<MTime>)> + 'a {
39 self.total_bytes = 0;
40
41 self.files
42 .drain()
43 .map(|(path, (_size, mtime))| (path, mtime))
44 }
45
46 pub fn len(&self) -> usize {
47 self.files.len()
48 }
49}