blade_belt.rs

  1use blade_graphics as gpu;
  2use std::mem;
  3
  4struct ReusableBuffer {
  5    raw: gpu::Buffer,
  6    size: u64,
  7}
  8
  9pub struct BladeBeltDescriptor {
 10    pub memory: gpu::Memory,
 11    pub min_chunk_size: u64,
 12    pub alignment: u64,
 13}
 14
 15/// A belt of buffers, used by the BladeAtlas to cheaply
 16/// find staging space for uploads.
 17pub struct BladeBelt {
 18    desc: BladeBeltDescriptor,
 19    buffers: Vec<(ReusableBuffer, gpu::SyncPoint)>,
 20    active: Vec<(ReusableBuffer, u64)>,
 21}
 22
 23impl BladeBelt {
 24    pub fn new(desc: BladeBeltDescriptor) -> Self {
 25        assert_ne!(desc.alignment, 0);
 26        Self {
 27            desc,
 28            buffers: Vec::new(),
 29            active: Vec::new(),
 30        }
 31    }
 32
 33    pub fn destroy(&mut self, gpu: &gpu::Context) {
 34        for (buffer, _) in self.buffers.drain(..) {
 35            gpu.destroy_buffer(buffer.raw);
 36        }
 37        for (buffer, _) in self.active.drain(..) {
 38            gpu.destroy_buffer(buffer.raw);
 39        }
 40    }
 41
 42    pub fn alloc(&mut self, size: u64, gpu: &gpu::Context) -> gpu::BufferPiece {
 43        for &mut (ref rb, ref mut offset) in self.active.iter_mut() {
 44            let aligned = offset.next_multiple_of(self.desc.alignment);
 45            if aligned + size <= rb.size {
 46                let piece = rb.raw.at(aligned);
 47                *offset = aligned + size;
 48                return piece;
 49            }
 50        }
 51
 52        let index_maybe = self
 53            .buffers
 54            .iter()
 55            .position(|(rb, sp)| size <= rb.size && gpu.wait_for(sp, 0));
 56        if let Some(index) = index_maybe {
 57            let (rb, _) = self.buffers.remove(index);
 58            let piece = rb.raw.into();
 59            self.active.push((rb, size));
 60            return piece;
 61        }
 62
 63        let chunk_index = self.buffers.len() + self.active.len();
 64        let chunk_size = size.max(self.desc.min_chunk_size);
 65        let chunk = gpu.create_buffer(gpu::BufferDesc {
 66            name: &format!("chunk-{}", chunk_index),
 67            size: chunk_size,
 68            memory: self.desc.memory,
 69        });
 70        let rb = ReusableBuffer {
 71            raw: chunk,
 72            size: chunk_size,
 73        };
 74        self.active.push((rb, size));
 75        chunk.into()
 76    }
 77
 78    //todo!(linux): enforce T: bytemuck::Zeroable
 79    pub fn alloc_data<T>(&mut self, data: &[T], gpu: &gpu::Context) -> gpu::BufferPiece {
 80        assert!(!data.is_empty());
 81        let type_alignment = mem::align_of::<T>() as u64;
 82        debug_assert_eq!(
 83            self.desc.alignment % type_alignment,
 84            0,
 85            "Type alignment {} is too big",
 86            type_alignment
 87        );
 88        let total_bytes = std::mem::size_of_val(data);
 89        let bp = self.alloc(total_bytes as u64, gpu);
 90        unsafe {
 91            std::ptr::copy_nonoverlapping(data.as_ptr() as *const u8, bp.data(), total_bytes);
 92        }
 93        bp
 94    }
 95
 96    pub fn flush(&mut self, sp: &gpu::SyncPoint) {
 97        self.buffers
 98            .extend(self.active.drain(..).map(|(rb, _)| (rb, sp.clone())));
 99    }
100}