blade_belt.rs

  1use blade_graphics as gpu;
  2use std::mem;
  3
  4struct ReusableBuffer {
  5    raw: gpu::Buffer,
  6    size: u64,
  7}
  8
  9pub struct BladeBeltDescriptor {
 10    pub memory: gpu::Memory,
 11    pub min_chunk_size: u64,
 12    pub alignment: u64,
 13}
 14
 15/// A belt of buffers, used by the BladeAtlas to cheaply
 16/// find staging space for uploads.
 17pub struct BladeBelt {
 18    desc: BladeBeltDescriptor,
 19    buffers: Vec<(ReusableBuffer, gpu::SyncPoint)>,
 20    active: Vec<(ReusableBuffer, u64)>,
 21}
 22
 23impl BladeBelt {
 24    pub fn new(desc: BladeBeltDescriptor) -> Self {
 25        assert_ne!(desc.alignment, 0);
 26        Self {
 27            desc,
 28            buffers: Vec::new(),
 29            active: Vec::new(),
 30        }
 31    }
 32
 33    pub fn destroy(&mut self, gpu: &gpu::Context) {
 34        for (buffer, _) in self.buffers.drain(..) {
 35            gpu.destroy_buffer(buffer.raw);
 36        }
 37        for (buffer, _) in self.active.drain(..) {
 38            gpu.destroy_buffer(buffer.raw);
 39        }
 40    }
 41
 42    #[profiling::function]
 43    pub fn alloc(&mut self, size: u64, gpu: &gpu::Context) -> gpu::BufferPiece {
 44        for &mut (ref rb, ref mut offset) in self.active.iter_mut() {
 45            let aligned = offset.next_multiple_of(self.desc.alignment);
 46            if aligned + size <= rb.size {
 47                let piece = rb.raw.at(aligned);
 48                *offset = aligned + size;
 49                return piece;
 50            }
 51        }
 52
 53        let index_maybe = self
 54            .buffers
 55            .iter()
 56            .position(|(rb, sp)| size <= rb.size && gpu.wait_for(sp, 0));
 57        if let Some(index) = index_maybe {
 58            let (rb, _) = self.buffers.remove(index);
 59            let piece = rb.raw.into();
 60            self.active.push((rb, size));
 61            return piece;
 62        }
 63
 64        let chunk_index = self.buffers.len() + self.active.len();
 65        let chunk_size = size.max(self.desc.min_chunk_size);
 66        let chunk = gpu.create_buffer(gpu::BufferDesc {
 67            name: &format!("chunk-{}", chunk_index),
 68            size: chunk_size,
 69            memory: self.desc.memory,
 70        });
 71        let rb = ReusableBuffer {
 72            raw: chunk,
 73            size: chunk_size,
 74        };
 75        self.active.push((rb, size));
 76        chunk.into()
 77    }
 78
 79    // SAFETY: T should be zeroable and ordinary data, no references, pointers, cells or other complicated data type.
 80    pub unsafe fn alloc_data<T>(&mut self, data: &[T], gpu: &gpu::Context) -> gpu::BufferPiece {
 81        assert!(!data.is_empty());
 82        let type_alignment = mem::align_of::<T>() as u64;
 83        debug_assert_eq!(
 84            self.desc.alignment % type_alignment,
 85            0,
 86            "Type alignment {} is too big",
 87            type_alignment
 88        );
 89        let total_bytes = std::mem::size_of_val(data);
 90        let bp = self.alloc(total_bytes as u64, gpu);
 91        unsafe {
 92            std::ptr::copy_nonoverlapping(data.as_ptr() as *const u8, bp.data(), total_bytes);
 93        }
 94        bp
 95    }
 96
 97    pub fn flush(&mut self, sp: &gpu::SyncPoint) {
 98        self.buffers
 99            .extend(self.active.drain(..).map(|(rb, _)| (rb, sp.clone())));
100    }
101}