1use crate::{
2 platform::AtlasTextureList, AtlasKey, AtlasTextureId, AtlasTextureKind, AtlasTile, Bounds,
3 DevicePixels, PlatformAtlas, Point, Size,
4};
5use anyhow::Result;
6use blade_graphics as gpu;
7use blade_util::{BufferBelt, BufferBeltDescriptor};
8use collections::FxHashMap;
9use etagere::BucketedAtlasAllocator;
10use parking_lot::Mutex;
11use std::{borrow::Cow, ops, sync::Arc};
12
13pub(crate) const PATH_TEXTURE_FORMAT: gpu::TextureFormat = gpu::TextureFormat::R16Float;
14
15pub(crate) struct BladeAtlas(Mutex<BladeAtlasState>);
16
17struct PendingUpload {
18 id: AtlasTextureId,
19 bounds: Bounds<DevicePixels>,
20 data: gpu::BufferPiece,
21}
22
23struct BladeAtlasState {
24 gpu: Arc<gpu::Context>,
25 upload_belt: BufferBelt,
26 storage: BladeAtlasStorage,
27 tiles_by_key: FxHashMap<AtlasKey, AtlasTile>,
28 initializations: Vec<AtlasTextureId>,
29 uploads: Vec<PendingUpload>,
30}
31
32#[cfg(gles)]
33unsafe impl Send for BladeAtlasState {}
34
35impl BladeAtlasState {
36 fn destroy(&mut self) {
37 self.storage.destroy(&self.gpu);
38 self.upload_belt.destroy(&self.gpu);
39 }
40}
41
42pub struct BladeTextureInfo {
43 pub size: gpu::Extent,
44 pub raw_view: gpu::TextureView,
45}
46
47impl BladeAtlas {
48 pub(crate) fn new(gpu: &Arc<gpu::Context>) -> Self {
49 BladeAtlas(Mutex::new(BladeAtlasState {
50 gpu: Arc::clone(gpu),
51 upload_belt: BufferBelt::new(BufferBeltDescriptor {
52 memory: gpu::Memory::Upload,
53 min_chunk_size: 0x10000,
54 alignment: 64, // Vulkan `optimalBufferCopyOffsetAlignment` on Intel XE
55 }),
56 storage: BladeAtlasStorage::default(),
57 tiles_by_key: Default::default(),
58 initializations: Vec::new(),
59 uploads: Vec::new(),
60 }))
61 }
62
63 pub(crate) fn destroy(&self) {
64 self.0.lock().destroy();
65 }
66
67 pub(crate) fn clear_textures(&self, texture_kind: AtlasTextureKind) {
68 let mut lock = self.0.lock();
69 let textures = &mut lock.storage[texture_kind];
70 for texture in textures.iter_mut() {
71 texture.clear();
72 }
73 }
74
75 /// Allocate a rectangle and make it available for rendering immediately (without waiting for `before_frame`)
76 pub fn allocate_for_rendering(
77 &self,
78 size: Size<DevicePixels>,
79 texture_kind: AtlasTextureKind,
80 gpu_encoder: &mut gpu::CommandEncoder,
81 ) -> AtlasTile {
82 let mut lock = self.0.lock();
83 let tile = lock.allocate(size, texture_kind);
84 lock.flush_initializations(gpu_encoder);
85 tile
86 }
87
88 pub fn before_frame(&self, gpu_encoder: &mut gpu::CommandEncoder) {
89 let mut lock = self.0.lock();
90 lock.flush(gpu_encoder);
91 }
92
93 pub fn after_frame(&self, sync_point: &gpu::SyncPoint) {
94 let mut lock = self.0.lock();
95 lock.upload_belt.flush(sync_point);
96 }
97
98 pub fn get_texture_info(&self, id: AtlasTextureId) -> BladeTextureInfo {
99 let lock = self.0.lock();
100 let texture = &lock.storage[id];
101 let size = texture.allocator.size();
102 BladeTextureInfo {
103 size: gpu::Extent {
104 width: size.width as u32,
105 height: size.height as u32,
106 depth: 1,
107 },
108 raw_view: texture.raw_view,
109 }
110 }
111}
112
113impl PlatformAtlas for BladeAtlas {
114 fn get_or_insert_with<'a>(
115 &self,
116 key: &AtlasKey,
117 build: &mut dyn FnMut() -> Result<Option<(Size<DevicePixels>, Cow<'a, [u8]>)>>,
118 ) -> Result<Option<AtlasTile>> {
119 let mut lock = self.0.lock();
120 if let Some(tile) = lock.tiles_by_key.get(key) {
121 Ok(Some(tile.clone()))
122 } else {
123 profiling::scope!("new tile");
124 let Some((size, bytes)) = build()? else {
125 return Ok(None);
126 };
127 let tile = lock.allocate(size, key.texture_kind());
128 lock.upload_texture(tile.texture_id, tile.bounds, &bytes);
129 lock.tiles_by_key.insert(key.clone(), tile.clone());
130 Ok(Some(tile))
131 }
132 }
133
134 fn remove(&self, key: &AtlasKey) {
135 let mut lock = self.0.lock();
136
137 let Some(id) = lock.tiles_by_key.remove(key).map(|tile| tile.texture_id) else {
138 return;
139 };
140
141 let Some(texture_slot) = lock.storage[id.kind].textures.get_mut(id.index as usize) else {
142 return;
143 };
144
145 if let Some(mut texture) = texture_slot.take() {
146 texture.decrement_ref_count();
147 if texture.is_unreferenced() {
148 lock.storage[id.kind]
149 .free_list
150 .push(texture.id.index as usize);
151 texture.destroy(&lock.gpu);
152 } else {
153 *texture_slot = Some(texture);
154 }
155 }
156 }
157}
158
159impl BladeAtlasState {
160 fn allocate(&mut self, size: Size<DevicePixels>, texture_kind: AtlasTextureKind) -> AtlasTile {
161 {
162 let textures = &mut self.storage[texture_kind];
163
164 if let Some(tile) = textures
165 .iter_mut()
166 .rev()
167 .find_map(|texture| texture.allocate(size))
168 {
169 return tile;
170 }
171 }
172
173 let texture = self.push_texture(size, texture_kind);
174 texture.allocate(size).unwrap()
175 }
176
177 fn push_texture(
178 &mut self,
179 min_size: Size<DevicePixels>,
180 kind: AtlasTextureKind,
181 ) -> &mut BladeAtlasTexture {
182 const DEFAULT_ATLAS_SIZE: Size<DevicePixels> = Size {
183 width: DevicePixels(1024),
184 height: DevicePixels(1024),
185 };
186
187 let size = min_size.max(&DEFAULT_ATLAS_SIZE);
188 let format;
189 let usage;
190 match kind {
191 AtlasTextureKind::Monochrome => {
192 format = gpu::TextureFormat::R8Unorm;
193 usage = gpu::TextureUsage::COPY | gpu::TextureUsage::RESOURCE;
194 }
195 AtlasTextureKind::Polychrome => {
196 format = gpu::TextureFormat::Bgra8UnormSrgb;
197 usage = gpu::TextureUsage::COPY | gpu::TextureUsage::RESOURCE;
198 }
199 AtlasTextureKind::Path => {
200 format = PATH_TEXTURE_FORMAT;
201 usage = gpu::TextureUsage::COPY
202 | gpu::TextureUsage::RESOURCE
203 | gpu::TextureUsage::TARGET;
204 }
205 }
206
207 let raw = self.gpu.create_texture(gpu::TextureDesc {
208 name: "atlas",
209 format,
210 size: gpu::Extent {
211 width: size.width.into(),
212 height: size.height.into(),
213 depth: 1,
214 },
215 array_layer_count: 1,
216 mip_level_count: 1,
217 sample_count: 1,
218 dimension: gpu::TextureDimension::D2,
219 usage,
220 });
221 let raw_view = self.gpu.create_texture_view(
222 raw,
223 gpu::TextureViewDesc {
224 name: "",
225 format,
226 dimension: gpu::ViewDimension::D2,
227 subresources: &Default::default(),
228 },
229 );
230
231 let texture_list = &mut self.storage[kind];
232 let index = texture_list.free_list.pop();
233
234 let atlas_texture = BladeAtlasTexture {
235 id: AtlasTextureId {
236 index: index.unwrap_or(texture_list.textures.len()) as u32,
237 kind,
238 },
239 allocator: etagere::BucketedAtlasAllocator::new(size.into()),
240 format,
241 raw,
242 raw_view,
243 live_atlas_keys: 0,
244 };
245
246 self.initializations.push(atlas_texture.id);
247
248 if let Some(ix) = index {
249 texture_list.textures[ix] = Some(atlas_texture);
250 texture_list.textures.get_mut(ix).unwrap().as_mut().unwrap()
251 } else {
252 texture_list.textures.push(Some(atlas_texture));
253 texture_list.textures.last_mut().unwrap().as_mut().unwrap()
254 }
255 }
256
257 fn upload_texture(&mut self, id: AtlasTextureId, bounds: Bounds<DevicePixels>, bytes: &[u8]) {
258 let data = self.upload_belt.alloc_bytes(bytes, &self.gpu);
259 self.uploads.push(PendingUpload { id, bounds, data });
260 }
261
262 fn flush_initializations(&mut self, encoder: &mut gpu::CommandEncoder) {
263 for id in self.initializations.drain(..) {
264 let texture = &self.storage[id];
265 encoder.init_texture(texture.raw);
266 }
267 }
268
269 fn flush(&mut self, encoder: &mut gpu::CommandEncoder) {
270 self.flush_initializations(encoder);
271
272 let mut transfers = encoder.transfer("atlas");
273 for upload in self.uploads.drain(..) {
274 let texture = &self.storage[upload.id];
275 transfers.copy_buffer_to_texture(
276 upload.data,
277 upload.bounds.size.width.to_bytes(texture.bytes_per_pixel()),
278 gpu::TexturePiece {
279 texture: texture.raw,
280 mip_level: 0,
281 array_layer: 0,
282 origin: [
283 upload.bounds.origin.x.into(),
284 upload.bounds.origin.y.into(),
285 0,
286 ],
287 },
288 gpu::Extent {
289 width: upload.bounds.size.width.into(),
290 height: upload.bounds.size.height.into(),
291 depth: 1,
292 },
293 );
294 }
295 }
296}
297
298#[derive(Default)]
299struct BladeAtlasStorage {
300 monochrome_textures: AtlasTextureList<BladeAtlasTexture>,
301 polychrome_textures: AtlasTextureList<BladeAtlasTexture>,
302 path_textures: AtlasTextureList<BladeAtlasTexture>,
303}
304
305impl ops::Index<AtlasTextureKind> for BladeAtlasStorage {
306 type Output = AtlasTextureList<BladeAtlasTexture>;
307 fn index(&self, kind: AtlasTextureKind) -> &Self::Output {
308 match kind {
309 crate::AtlasTextureKind::Monochrome => &self.monochrome_textures,
310 crate::AtlasTextureKind::Polychrome => &self.polychrome_textures,
311 crate::AtlasTextureKind::Path => &self.path_textures,
312 }
313 }
314}
315
316impl ops::IndexMut<AtlasTextureKind> for BladeAtlasStorage {
317 fn index_mut(&mut self, kind: AtlasTextureKind) -> &mut Self::Output {
318 match kind {
319 crate::AtlasTextureKind::Monochrome => &mut self.monochrome_textures,
320 crate::AtlasTextureKind::Polychrome => &mut self.polychrome_textures,
321 crate::AtlasTextureKind::Path => &mut self.path_textures,
322 }
323 }
324}
325
326impl ops::Index<AtlasTextureId> for BladeAtlasStorage {
327 type Output = BladeAtlasTexture;
328 fn index(&self, id: AtlasTextureId) -> &Self::Output {
329 let textures = match id.kind {
330 crate::AtlasTextureKind::Monochrome => &self.monochrome_textures,
331 crate::AtlasTextureKind::Polychrome => &self.polychrome_textures,
332 crate::AtlasTextureKind::Path => &self.path_textures,
333 };
334 textures[id.index as usize].as_ref().unwrap()
335 }
336}
337
338impl BladeAtlasStorage {
339 fn destroy(&mut self, gpu: &gpu::Context) {
340 for mut texture in self.monochrome_textures.drain().flatten() {
341 texture.destroy(gpu);
342 }
343 for mut texture in self.polychrome_textures.drain().flatten() {
344 texture.destroy(gpu);
345 }
346 for mut texture in self.path_textures.drain().flatten() {
347 texture.destroy(gpu);
348 }
349 }
350}
351
352struct BladeAtlasTexture {
353 id: AtlasTextureId,
354 allocator: BucketedAtlasAllocator,
355 raw: gpu::Texture,
356 raw_view: gpu::TextureView,
357 format: gpu::TextureFormat,
358 live_atlas_keys: u32,
359}
360
361impl BladeAtlasTexture {
362 fn clear(&mut self) {
363 self.allocator.clear();
364 }
365
366 fn allocate(&mut self, size: Size<DevicePixels>) -> Option<AtlasTile> {
367 let allocation = self.allocator.allocate(size.into())?;
368 let tile = AtlasTile {
369 texture_id: self.id,
370 tile_id: allocation.id.into(),
371 padding: 0,
372 bounds: Bounds {
373 origin: allocation.rectangle.min.into(),
374 size,
375 },
376 };
377 self.live_atlas_keys += 1;
378 Some(tile)
379 }
380
381 fn destroy(&mut self, gpu: &gpu::Context) {
382 gpu.destroy_texture(self.raw);
383 gpu.destroy_texture_view(self.raw_view);
384 }
385
386 fn bytes_per_pixel(&self) -> u8 {
387 self.format.block_info().size
388 }
389
390 fn decrement_ref_count(&mut self) {
391 self.live_atlas_keys -= 1;
392 }
393
394 fn is_unreferenced(&mut self) -> bool {
395 self.live_atlas_keys == 0
396 }
397}
398
399impl From<Size<DevicePixels>> for etagere::Size {
400 fn from(size: Size<DevicePixels>) -> Self {
401 etagere::Size::new(size.width.into(), size.height.into())
402 }
403}
404
405impl From<etagere::Point> for Point<DevicePixels> {
406 fn from(value: etagere::Point) -> Self {
407 Point {
408 x: DevicePixels::from(value.x),
409 y: DevicePixels::from(value.y),
410 }
411 }
412}
413
414impl From<etagere::Size> for Size<DevicePixels> {
415 fn from(size: etagere::Size) -> Self {
416 Size {
417 width: DevicePixels::from(size.width),
418 height: DevicePixels::from(size.height),
419 }
420 }
421}
422
423impl From<etagere::Rectangle> for Bounds<DevicePixels> {
424 fn from(rectangle: etagere::Rectangle) -> Self {
425 Bounds {
426 origin: rectangle.min.into(),
427 size: rectangle.size().into(),
428 }
429 }
430}