blade_belt.rs

 1use blade_graphics as gpu;
 2use std::mem;
 3
 4struct ReusableBuffer {
 5    raw: gpu::Buffer,
 6    size: u64,
 7}
 8
 9pub struct BladeBeltDescriptor {
10    pub memory: gpu::Memory,
11    pub min_chunk_size: u64,
12}
13
14/// A belt of buffers, used by the BladeAtlas to cheaply
15/// find staging space for uploads.
16pub struct BladeBelt {
17    desc: BladeBeltDescriptor,
18    buffers: Vec<(ReusableBuffer, gpu::SyncPoint)>,
19    active: Vec<(ReusableBuffer, u64)>,
20}
21
22impl BladeBelt {
23    pub fn new(desc: BladeBeltDescriptor) -> Self {
24        Self {
25            desc,
26            buffers: Vec::new(),
27            active: Vec::new(),
28        }
29    }
30
31    pub fn destroy(&mut self, gpu: &gpu::Context) {
32        for (buffer, _) in self.buffers.drain(..) {
33            gpu.destroy_buffer(buffer.raw);
34        }
35        for (buffer, _) in self.active.drain(..) {
36            gpu.destroy_buffer(buffer.raw);
37        }
38    }
39
40    pub fn alloc(&mut self, size: u64, gpu: &gpu::Context) -> gpu::BufferPiece {
41        for &mut (ref rb, ref mut offset) in self.active.iter_mut() {
42            if *offset + size <= rb.size {
43                let piece = rb.raw.at(*offset);
44                *offset += size;
45                return piece;
46            }
47        }
48
49        let index_maybe = self
50            .buffers
51            .iter()
52            .position(|&(ref rb, ref sp)| size <= rb.size && gpu.wait_for(sp, 0));
53        if let Some(index) = index_maybe {
54            let (rb, _) = self.buffers.remove(index);
55            let piece = rb.raw.into();
56            self.active.push((rb, size));
57            return piece;
58        }
59
60        let chunk_index = self.buffers.len() + self.active.len();
61        let chunk_size = size.max(self.desc.min_chunk_size);
62        let chunk = gpu.create_buffer(gpu::BufferDesc {
63            name: &format!("chunk-{}", chunk_index),
64            size: chunk_size,
65            memory: self.desc.memory,
66        });
67        let rb = ReusableBuffer {
68            raw: chunk,
69            size: chunk_size,
70        };
71        self.active.push((rb, size));
72        chunk.into()
73    }
74
75    //Note: assuming T: bytemuck::Zeroable
76    pub fn alloc_data<T>(&mut self, data: &[T], gpu: &gpu::Context) -> gpu::BufferPiece {
77        assert!(!data.is_empty());
78        let alignment = mem::align_of::<T>() as u64;
79        let total_bytes = data.len() * mem::size_of::<T>();
80        let mut bp = self.alloc(alignment + (total_bytes - 1) as u64, gpu);
81        let rem = bp.offset % alignment;
82        if rem != 0 {
83            bp.offset += alignment - rem;
84        }
85        unsafe {
86            std::ptr::copy_nonoverlapping(data.as_ptr() as *const u8, bp.data(), total_bytes);
87        }
88        bp
89    }
90
91    pub fn flush(&mut self, sp: &gpu::SyncPoint) {
92        self.buffers
93            .extend(self.active.drain(..).map(|(rb, _)| (rb, sp.clone())));
94    }
95}