1use super::metal_atlas::MetalAtlas;
2use crate::{
3 AtlasTextureId, Background, Bounds, ContentMask, DevicePixels, MonochromeSprite, PaintSurface,
4 Path, Point, PolychromeSprite, PrimitiveBatch, Quad, ScaledPixels, Scene, Shadow, Size,
5 Surface, Underline, point, size,
6};
7use anyhow::Result;
8use block::ConcreteBlock;
9use cocoa::{
10 base::{NO, YES},
11 foundation::{NSSize, NSUInteger},
12 quartzcore::AutoresizingMask,
13};
14
15use core_foundation::base::TCFType;
16use core_video::{
17 metal_texture::CVMetalTextureGetTexture, metal_texture_cache::CVMetalTextureCache,
18 pixel_buffer::kCVPixelFormatType_420YpCbCr8BiPlanarFullRange,
19};
20use foreign_types::{ForeignType, ForeignTypeRef};
21use metal::{
22 CAMetalLayer, CommandQueue, MTLPixelFormat, MTLResourceOptions, NSRange,
23 RenderPassColorAttachmentDescriptorRef,
24};
25use objc::{self, msg_send, sel, sel_impl};
26use parking_lot::Mutex;
27
28use std::{cell::Cell, ffi::c_void, mem, ptr, sync::Arc};
29
30// Exported to metal
31pub(crate) type PointF = crate::Point<f32>;
32
33#[cfg(not(feature = "runtime_shaders"))]
34const SHADERS_METALLIB: &[u8] = include_bytes!(concat!(env!("OUT_DIR"), "/shaders.metallib"));
35#[cfg(feature = "runtime_shaders")]
36const SHADERS_SOURCE_FILE: &str = include_str!(concat!(env!("OUT_DIR"), "/stitched_shaders.metal"));
37// Use 4x MSAA, all devices support it.
38// https://developer.apple.com/documentation/metal/mtldevice/1433355-supportstexturesamplecount
39const PATH_SAMPLE_COUNT: u32 = 4;
40
41pub type Context = Arc<Mutex<InstanceBufferPool>>;
42pub type Renderer = MetalRenderer;
43
44pub unsafe fn new_renderer(
45 context: self::Context,
46 _native_window: *mut c_void,
47 _native_view: *mut c_void,
48 _bounds: crate::Size<f32>,
49 transparent: bool,
50) -> Renderer {
51 MetalRenderer::new(context, transparent)
52}
53
54pub(crate) struct InstanceBufferPool {
55 buffer_size: usize,
56 buffers: Vec<metal::Buffer>,
57}
58
59impl Default for InstanceBufferPool {
60 fn default() -> Self {
61 Self {
62 buffer_size: 2 * 1024 * 1024,
63 buffers: Vec::new(),
64 }
65 }
66}
67
68pub(crate) struct InstanceBuffer {
69 metal_buffer: metal::Buffer,
70 size: usize,
71}
72
73impl InstanceBufferPool {
74 pub(crate) fn reset(&mut self, buffer_size: usize) {
75 self.buffer_size = buffer_size;
76 self.buffers.clear();
77 }
78
79 pub(crate) fn acquire(&mut self, device: &metal::Device) -> InstanceBuffer {
80 let buffer = self.buffers.pop().unwrap_or_else(|| {
81 device.new_buffer(
82 self.buffer_size as u64,
83 MTLResourceOptions::StorageModeManaged,
84 )
85 });
86 InstanceBuffer {
87 metal_buffer: buffer,
88 size: self.buffer_size,
89 }
90 }
91
92 pub(crate) fn release(&mut self, buffer: InstanceBuffer) {
93 if buffer.size == self.buffer_size {
94 self.buffers.push(buffer.metal_buffer)
95 }
96 }
97}
98
99pub(crate) struct MetalRenderer {
100 device: metal::Device,
101 layer: metal::MetalLayer,
102 presents_with_transaction: bool,
103 command_queue: CommandQueue,
104 paths_rasterization_pipeline_state: metal::RenderPipelineState,
105 path_sprites_pipeline_state: metal::RenderPipelineState,
106 shadows_pipeline_state: metal::RenderPipelineState,
107 quads_pipeline_state: metal::RenderPipelineState,
108 underlines_pipeline_state: metal::RenderPipelineState,
109 monochrome_sprites_pipeline_state: metal::RenderPipelineState,
110 polychrome_sprites_pipeline_state: metal::RenderPipelineState,
111 surfaces_pipeline_state: metal::RenderPipelineState,
112 unit_vertices: metal::Buffer,
113 #[allow(clippy::arc_with_non_send_sync)]
114 instance_buffer_pool: Arc<Mutex<InstanceBufferPool>>,
115 sprite_atlas: Arc<MetalAtlas>,
116 core_video_texture_cache: core_video::metal_texture_cache::CVMetalTextureCache,
117 path_intermediate_texture: Option<metal::Texture>,
118 path_intermediate_msaa_texture: Option<metal::Texture>,
119 path_sample_count: u32,
120}
121
122#[repr(C)]
123pub struct PathRasterizationVertex {
124 pub xy_position: Point<ScaledPixels>,
125 pub st_position: Point<f32>,
126 pub color: Background,
127 pub bounds: Bounds<ScaledPixels>,
128}
129
130impl MetalRenderer {
131 pub fn new(instance_buffer_pool: Arc<Mutex<InstanceBufferPool>>, transparent: bool) -> Self {
132 // Prefer low‐power integrated GPUs on Intel Mac. On Apple
133 // Silicon, there is only ever one GPU, so this is equivalent to
134 // `metal::Device::system_default()`.
135 let device = if let Some(d) = metal::Device::all()
136 .into_iter()
137 .min_by_key(|d| (d.is_removable(), !d.is_low_power()))
138 {
139 d
140 } else {
141 // For some reason `all()` can return an empty list, see https://github.com/zed-industries/zed/issues/37689
142 // In that case, we fall back to the system default device.
143 log::error!(
144 "Unable to enumerate Metal devices; attempting to use system default device"
145 );
146 metal::Device::system_default().unwrap_or_else(|| {
147 log::error!("unable to access a compatible graphics device");
148 std::process::exit(1);
149 })
150 };
151
152 let layer = metal::MetalLayer::new();
153 layer.set_device(&device);
154 layer.set_pixel_format(MTLPixelFormat::BGRA8Unorm);
155 // Support direct-to-display rendering if the window is not transparent
156 // https://developer.apple.com/documentation/metal/managing-your-game-window-for-metal-in-macos
157 layer.set_opaque(!transparent);
158 layer.set_maximum_drawable_count(3);
159 // We already present at display sync with the display link
160 // This allows to use direct-to-display even in window mode
161 layer.set_display_sync_enabled(false);
162 unsafe {
163 let _: () = msg_send![&*layer, setAllowsNextDrawableTimeout: NO];
164 let _: () = msg_send![&*layer, setNeedsDisplayOnBoundsChange: YES];
165 let _: () = msg_send![
166 &*layer,
167 setAutoresizingMask: AutoresizingMask::WIDTH_SIZABLE
168 | AutoresizingMask::HEIGHT_SIZABLE
169 ];
170 }
171 #[cfg(feature = "runtime_shaders")]
172 let library = device
173 .new_library_with_source(&SHADERS_SOURCE_FILE, &metal::CompileOptions::new())
174 .expect("error building metal library");
175 #[cfg(not(feature = "runtime_shaders"))]
176 let library = device
177 .new_library_with_data(SHADERS_METALLIB)
178 .expect("error building metal library");
179
180 fn to_float2_bits(point: PointF) -> u64 {
181 let mut output = point.y.to_bits() as u64;
182 output <<= 32;
183 output |= point.x.to_bits() as u64;
184 output
185 }
186
187 let unit_vertices = [
188 to_float2_bits(point(0., 0.)),
189 to_float2_bits(point(1., 0.)),
190 to_float2_bits(point(0., 1.)),
191 to_float2_bits(point(0., 1.)),
192 to_float2_bits(point(1., 0.)),
193 to_float2_bits(point(1., 1.)),
194 ];
195 let unit_vertices = device.new_buffer_with_data(
196 unit_vertices.as_ptr() as *const c_void,
197 mem::size_of_val(&unit_vertices) as u64,
198 MTLResourceOptions::StorageModeManaged,
199 );
200
201 let paths_rasterization_pipeline_state = build_path_rasterization_pipeline_state(
202 &device,
203 &library,
204 "paths_rasterization",
205 "path_rasterization_vertex",
206 "path_rasterization_fragment",
207 MTLPixelFormat::BGRA8Unorm,
208 PATH_SAMPLE_COUNT,
209 );
210 let path_sprites_pipeline_state = build_path_sprite_pipeline_state(
211 &device,
212 &library,
213 "path_sprites",
214 "path_sprite_vertex",
215 "path_sprite_fragment",
216 MTLPixelFormat::BGRA8Unorm,
217 );
218 let shadows_pipeline_state = build_pipeline_state(
219 &device,
220 &library,
221 "shadows",
222 "shadow_vertex",
223 "shadow_fragment",
224 MTLPixelFormat::BGRA8Unorm,
225 );
226 let quads_pipeline_state = build_pipeline_state(
227 &device,
228 &library,
229 "quads",
230 "quad_vertex",
231 "quad_fragment",
232 MTLPixelFormat::BGRA8Unorm,
233 );
234 let underlines_pipeline_state = build_pipeline_state(
235 &device,
236 &library,
237 "underlines",
238 "underline_vertex",
239 "underline_fragment",
240 MTLPixelFormat::BGRA8Unorm,
241 );
242 let monochrome_sprites_pipeline_state = build_pipeline_state(
243 &device,
244 &library,
245 "monochrome_sprites",
246 "monochrome_sprite_vertex",
247 "monochrome_sprite_fragment",
248 MTLPixelFormat::BGRA8Unorm,
249 );
250 let polychrome_sprites_pipeline_state = build_pipeline_state(
251 &device,
252 &library,
253 "polychrome_sprites",
254 "polychrome_sprite_vertex",
255 "polychrome_sprite_fragment",
256 MTLPixelFormat::BGRA8Unorm,
257 );
258 let surfaces_pipeline_state = build_pipeline_state(
259 &device,
260 &library,
261 "surfaces",
262 "surface_vertex",
263 "surface_fragment",
264 MTLPixelFormat::BGRA8Unorm,
265 );
266
267 let command_queue = device.new_command_queue();
268 let sprite_atlas = Arc::new(MetalAtlas::new(device.clone()));
269 let core_video_texture_cache =
270 CVMetalTextureCache::new(None, device.clone(), None).unwrap();
271
272 Self {
273 device,
274 layer,
275 presents_with_transaction: false,
276 command_queue,
277 paths_rasterization_pipeline_state,
278 path_sprites_pipeline_state,
279 shadows_pipeline_state,
280 quads_pipeline_state,
281 underlines_pipeline_state,
282 monochrome_sprites_pipeline_state,
283 polychrome_sprites_pipeline_state,
284 surfaces_pipeline_state,
285 unit_vertices,
286 instance_buffer_pool,
287 sprite_atlas,
288 core_video_texture_cache,
289 path_intermediate_texture: None,
290 path_intermediate_msaa_texture: None,
291 path_sample_count: PATH_SAMPLE_COUNT,
292 }
293 }
294
295 pub fn layer(&self) -> &metal::MetalLayerRef {
296 &self.layer
297 }
298
299 pub fn layer_ptr(&self) -> *mut CAMetalLayer {
300 self.layer.as_ptr()
301 }
302
303 pub fn sprite_atlas(&self) -> &Arc<MetalAtlas> {
304 &self.sprite_atlas
305 }
306
307 pub fn set_presents_with_transaction(&mut self, presents_with_transaction: bool) {
308 self.presents_with_transaction = presents_with_transaction;
309 self.layer
310 .set_presents_with_transaction(presents_with_transaction);
311 }
312
313 pub fn update_drawable_size(&mut self, size: Size<DevicePixels>) {
314 let size = NSSize {
315 width: size.width.0 as f64,
316 height: size.height.0 as f64,
317 };
318 unsafe {
319 let _: () = msg_send![
320 self.layer(),
321 setDrawableSize: size
322 ];
323 }
324 let device_pixels_size = Size {
325 width: DevicePixels(size.width as i32),
326 height: DevicePixels(size.height as i32),
327 };
328 self.update_path_intermediate_textures(device_pixels_size);
329 }
330
331 fn update_path_intermediate_textures(&mut self, size: Size<DevicePixels>) {
332 // We are uncertain when this happens, but sometimes size can be 0 here. Most likely before
333 // the layout pass on window creation. Zero-sized texture creation causes SIGABRT.
334 // https://github.com/zed-industries/zed/issues/36229
335 if size.width.0 <= 0 || size.height.0 <= 0 {
336 self.path_intermediate_texture = None;
337 self.path_intermediate_msaa_texture = None;
338 return;
339 }
340
341 let texture_descriptor = metal::TextureDescriptor::new();
342 texture_descriptor.set_width(size.width.0 as u64);
343 texture_descriptor.set_height(size.height.0 as u64);
344 texture_descriptor.set_pixel_format(metal::MTLPixelFormat::BGRA8Unorm);
345 texture_descriptor
346 .set_usage(metal::MTLTextureUsage::RenderTarget | metal::MTLTextureUsage::ShaderRead);
347 self.path_intermediate_texture = Some(self.device.new_texture(&texture_descriptor));
348
349 if self.path_sample_count > 1 {
350 let mut msaa_descriptor = texture_descriptor;
351 msaa_descriptor.set_texture_type(metal::MTLTextureType::D2Multisample);
352 msaa_descriptor.set_storage_mode(metal::MTLStorageMode::Private);
353 msaa_descriptor.set_sample_count(self.path_sample_count as _);
354 self.path_intermediate_msaa_texture = Some(self.device.new_texture(&msaa_descriptor));
355 } else {
356 self.path_intermediate_msaa_texture = None;
357 }
358 }
359
360 pub fn update_transparency(&self, transparent: bool) {
361 self.layer.set_opaque(!transparent);
362 }
363
364 pub fn destroy(&self) {
365 // nothing to do
366 }
367
368 pub fn draw(&mut self, scene: &Scene) {
369 let layer = self.layer.clone();
370 let viewport_size = layer.drawable_size();
371 let viewport_size: Size<DevicePixels> = size(
372 (viewport_size.width.ceil() as i32).into(),
373 (viewport_size.height.ceil() as i32).into(),
374 );
375 let drawable = if let Some(drawable) = layer.next_drawable() {
376 drawable
377 } else {
378 log::error!(
379 "failed to retrieve next drawable, drawable size: {:?}",
380 viewport_size
381 );
382 return;
383 };
384
385 loop {
386 let mut instance_buffer = self.instance_buffer_pool.lock().acquire(&self.device);
387
388 let command_buffer =
389 self.draw_primitives(scene, &mut instance_buffer, drawable, viewport_size);
390
391 match command_buffer {
392 Ok(command_buffer) => {
393 let instance_buffer_pool = self.instance_buffer_pool.clone();
394 let instance_buffer = Cell::new(Some(instance_buffer));
395 let block = ConcreteBlock::new(move |_| {
396 if let Some(instance_buffer) = instance_buffer.take() {
397 instance_buffer_pool.lock().release(instance_buffer);
398 }
399 });
400 let block = block.copy();
401 command_buffer.add_completed_handler(&block);
402
403 if self.presents_with_transaction {
404 command_buffer.commit();
405 command_buffer.wait_until_scheduled();
406 drawable.present();
407 } else {
408 command_buffer.present_drawable(drawable);
409 command_buffer.commit();
410 }
411 return;
412 }
413 Err(err) => {
414 log::error!(
415 "failed to render: {}. retrying with larger instance buffer size",
416 err
417 );
418 let mut instance_buffer_pool = self.instance_buffer_pool.lock();
419 let buffer_size = instance_buffer_pool.buffer_size;
420 if buffer_size >= 256 * 1024 * 1024 {
421 log::error!("instance buffer size grew too large: {}", buffer_size);
422 break;
423 }
424 instance_buffer_pool.reset(buffer_size * 2);
425 log::info!(
426 "increased instance buffer size to {}",
427 instance_buffer_pool.buffer_size
428 );
429 }
430 }
431 }
432 }
433
434 fn draw_primitives(
435 &mut self,
436 scene: &Scene,
437 instance_buffer: &mut InstanceBuffer,
438 drawable: &metal::MetalDrawableRef,
439 viewport_size: Size<DevicePixels>,
440 ) -> Result<metal::CommandBuffer> {
441 let command_queue = self.command_queue.clone();
442 let command_buffer = command_queue.new_command_buffer();
443 let alpha = if self.layer.is_opaque() { 1. } else { 0. };
444 let mut instance_offset = 0;
445
446 let mut command_encoder = new_command_encoder(
447 command_buffer,
448 drawable,
449 viewport_size,
450 |color_attachment| {
451 color_attachment.set_load_action(metal::MTLLoadAction::Clear);
452 color_attachment.set_clear_color(metal::MTLClearColor::new(0., 0., 0., alpha));
453 },
454 );
455
456 for batch in scene.batches() {
457 let ok = match batch {
458 PrimitiveBatch::Shadows(shadows) => self.draw_shadows(
459 shadows,
460 instance_buffer,
461 &mut instance_offset,
462 viewport_size,
463 command_encoder,
464 ),
465 PrimitiveBatch::Quads(quads) => self.draw_quads(
466 quads,
467 instance_buffer,
468 &mut instance_offset,
469 viewport_size,
470 command_encoder,
471 ),
472 PrimitiveBatch::Paths(paths) => {
473 command_encoder.end_encoding();
474
475 let did_draw = self.draw_paths_to_intermediate(
476 paths,
477 instance_buffer,
478 &mut instance_offset,
479 viewport_size,
480 command_buffer,
481 );
482
483 command_encoder = new_command_encoder(
484 command_buffer,
485 drawable,
486 viewport_size,
487 |color_attachment| {
488 color_attachment.set_load_action(metal::MTLLoadAction::Load);
489 },
490 );
491
492 if did_draw {
493 self.draw_paths_from_intermediate(
494 paths,
495 instance_buffer,
496 &mut instance_offset,
497 viewport_size,
498 command_encoder,
499 )
500 } else {
501 false
502 }
503 }
504 PrimitiveBatch::Underlines(underlines) => self.draw_underlines(
505 underlines,
506 instance_buffer,
507 &mut instance_offset,
508 viewport_size,
509 command_encoder,
510 ),
511 PrimitiveBatch::MonochromeSprites {
512 texture_id,
513 sprites,
514 } => self.draw_monochrome_sprites(
515 texture_id,
516 sprites,
517 instance_buffer,
518 &mut instance_offset,
519 viewport_size,
520 command_encoder,
521 ),
522 PrimitiveBatch::PolychromeSprites {
523 texture_id,
524 sprites,
525 } => self.draw_polychrome_sprites(
526 texture_id,
527 sprites,
528 instance_buffer,
529 &mut instance_offset,
530 viewport_size,
531 command_encoder,
532 ),
533 PrimitiveBatch::Surfaces(surfaces) => self.draw_surfaces(
534 surfaces,
535 instance_buffer,
536 &mut instance_offset,
537 viewport_size,
538 command_encoder,
539 ),
540 };
541 if !ok {
542 command_encoder.end_encoding();
543 anyhow::bail!(
544 "scene too large: {} paths, {} shadows, {} quads, {} underlines, {} mono, {} poly, {} surfaces",
545 scene.paths.len(),
546 scene.shadows.len(),
547 scene.quads.len(),
548 scene.underlines.len(),
549 scene.monochrome_sprites.len(),
550 scene.polychrome_sprites.len(),
551 scene.surfaces.len(),
552 );
553 }
554 }
555
556 command_encoder.end_encoding();
557
558 instance_buffer.metal_buffer.did_modify_range(NSRange {
559 location: 0,
560 length: instance_offset as NSUInteger,
561 });
562 Ok(command_buffer.to_owned())
563 }
564
565 fn draw_paths_to_intermediate(
566 &self,
567 paths: &[Path<ScaledPixels>],
568 instance_buffer: &mut InstanceBuffer,
569 instance_offset: &mut usize,
570 viewport_size: Size<DevicePixels>,
571 command_buffer: &metal::CommandBufferRef,
572 ) -> bool {
573 if paths.is_empty() {
574 return true;
575 }
576 let Some(intermediate_texture) = &self.path_intermediate_texture else {
577 return false;
578 };
579
580 let render_pass_descriptor = metal::RenderPassDescriptor::new();
581 let color_attachment = render_pass_descriptor
582 .color_attachments()
583 .object_at(0)
584 .unwrap();
585 color_attachment.set_load_action(metal::MTLLoadAction::Clear);
586 color_attachment.set_clear_color(metal::MTLClearColor::new(0., 0., 0., 0.));
587
588 if let Some(msaa_texture) = &self.path_intermediate_msaa_texture {
589 color_attachment.set_texture(Some(msaa_texture));
590 color_attachment.set_resolve_texture(Some(intermediate_texture));
591 color_attachment.set_store_action(metal::MTLStoreAction::MultisampleResolve);
592 } else {
593 color_attachment.set_texture(Some(intermediate_texture));
594 color_attachment.set_store_action(metal::MTLStoreAction::Store);
595 }
596
597 let command_encoder = command_buffer.new_render_command_encoder(render_pass_descriptor);
598 command_encoder.set_render_pipeline_state(&self.paths_rasterization_pipeline_state);
599
600 align_offset(instance_offset);
601 let mut vertices = Vec::new();
602 for path in paths {
603 vertices.extend(path.vertices.iter().map(|v| PathRasterizationVertex {
604 xy_position: v.xy_position,
605 st_position: v.st_position,
606 color: path.color,
607 bounds: path.bounds.intersect(&path.content_mask.bounds),
608 }));
609 }
610 let vertices_bytes_len = mem::size_of_val(vertices.as_slice());
611 let next_offset = *instance_offset + vertices_bytes_len;
612 if next_offset > instance_buffer.size {
613 command_encoder.end_encoding();
614 return false;
615 }
616 command_encoder.set_vertex_buffer(
617 PathRasterizationInputIndex::Vertices as u64,
618 Some(&instance_buffer.metal_buffer),
619 *instance_offset as u64,
620 );
621 command_encoder.set_vertex_bytes(
622 PathRasterizationInputIndex::ViewportSize as u64,
623 mem::size_of_val(&viewport_size) as u64,
624 &viewport_size as *const Size<DevicePixels> as *const _,
625 );
626 command_encoder.set_fragment_buffer(
627 PathRasterizationInputIndex::Vertices as u64,
628 Some(&instance_buffer.metal_buffer),
629 *instance_offset as u64,
630 );
631 let buffer_contents =
632 unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
633 unsafe {
634 ptr::copy_nonoverlapping(
635 vertices.as_ptr() as *const u8,
636 buffer_contents,
637 vertices_bytes_len,
638 );
639 }
640 command_encoder.draw_primitives(
641 metal::MTLPrimitiveType::Triangle,
642 0,
643 vertices.len() as u64,
644 );
645 *instance_offset = next_offset;
646
647 command_encoder.end_encoding();
648 true
649 }
650
651 fn draw_shadows(
652 &self,
653 shadows: &[Shadow],
654 instance_buffer: &mut InstanceBuffer,
655 instance_offset: &mut usize,
656 viewport_size: Size<DevicePixels>,
657 command_encoder: &metal::RenderCommandEncoderRef,
658 ) -> bool {
659 if shadows.is_empty() {
660 return true;
661 }
662 align_offset(instance_offset);
663
664 command_encoder.set_render_pipeline_state(&self.shadows_pipeline_state);
665 command_encoder.set_vertex_buffer(
666 ShadowInputIndex::Vertices as u64,
667 Some(&self.unit_vertices),
668 0,
669 );
670 command_encoder.set_vertex_buffer(
671 ShadowInputIndex::Shadows as u64,
672 Some(&instance_buffer.metal_buffer),
673 *instance_offset as u64,
674 );
675 command_encoder.set_fragment_buffer(
676 ShadowInputIndex::Shadows as u64,
677 Some(&instance_buffer.metal_buffer),
678 *instance_offset as u64,
679 );
680
681 command_encoder.set_vertex_bytes(
682 ShadowInputIndex::ViewportSize as u64,
683 mem::size_of_val(&viewport_size) as u64,
684 &viewport_size as *const Size<DevicePixels> as *const _,
685 );
686
687 let shadow_bytes_len = mem::size_of_val(shadows);
688 let buffer_contents =
689 unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
690
691 let next_offset = *instance_offset + shadow_bytes_len;
692 if next_offset > instance_buffer.size {
693 return false;
694 }
695
696 unsafe {
697 ptr::copy_nonoverlapping(
698 shadows.as_ptr() as *const u8,
699 buffer_contents,
700 shadow_bytes_len,
701 );
702 }
703
704 command_encoder.draw_primitives_instanced(
705 metal::MTLPrimitiveType::Triangle,
706 0,
707 6,
708 shadows.len() as u64,
709 );
710 *instance_offset = next_offset;
711 true
712 }
713
714 fn draw_quads(
715 &self,
716 quads: &[Quad],
717 instance_buffer: &mut InstanceBuffer,
718 instance_offset: &mut usize,
719 viewport_size: Size<DevicePixels>,
720 command_encoder: &metal::RenderCommandEncoderRef,
721 ) -> bool {
722 if quads.is_empty() {
723 return true;
724 }
725 align_offset(instance_offset);
726
727 command_encoder.set_render_pipeline_state(&self.quads_pipeline_state);
728 command_encoder.set_vertex_buffer(
729 QuadInputIndex::Vertices as u64,
730 Some(&self.unit_vertices),
731 0,
732 );
733 command_encoder.set_vertex_buffer(
734 QuadInputIndex::Quads as u64,
735 Some(&instance_buffer.metal_buffer),
736 *instance_offset as u64,
737 );
738 command_encoder.set_fragment_buffer(
739 QuadInputIndex::Quads as u64,
740 Some(&instance_buffer.metal_buffer),
741 *instance_offset as u64,
742 );
743
744 command_encoder.set_vertex_bytes(
745 QuadInputIndex::ViewportSize as u64,
746 mem::size_of_val(&viewport_size) as u64,
747 &viewport_size as *const Size<DevicePixels> as *const _,
748 );
749
750 let quad_bytes_len = mem::size_of_val(quads);
751 let buffer_contents =
752 unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
753
754 let next_offset = *instance_offset + quad_bytes_len;
755 if next_offset > instance_buffer.size {
756 return false;
757 }
758
759 unsafe {
760 ptr::copy_nonoverlapping(quads.as_ptr() as *const u8, buffer_contents, quad_bytes_len);
761 }
762
763 command_encoder.draw_primitives_instanced(
764 metal::MTLPrimitiveType::Triangle,
765 0,
766 6,
767 quads.len() as u64,
768 );
769 *instance_offset = next_offset;
770 true
771 }
772
773 fn draw_paths_from_intermediate(
774 &self,
775 paths: &[Path<ScaledPixels>],
776 instance_buffer: &mut InstanceBuffer,
777 instance_offset: &mut usize,
778 viewport_size: Size<DevicePixels>,
779 command_encoder: &metal::RenderCommandEncoderRef,
780 ) -> bool {
781 let Some(first_path) = paths.first() else {
782 return true;
783 };
784
785 let Some(ref intermediate_texture) = self.path_intermediate_texture else {
786 return false;
787 };
788
789 command_encoder.set_render_pipeline_state(&self.path_sprites_pipeline_state);
790 command_encoder.set_vertex_buffer(
791 SpriteInputIndex::Vertices as u64,
792 Some(&self.unit_vertices),
793 0,
794 );
795 command_encoder.set_vertex_bytes(
796 SpriteInputIndex::ViewportSize as u64,
797 mem::size_of_val(&viewport_size) as u64,
798 &viewport_size as *const Size<DevicePixels> as *const _,
799 );
800
801 command_encoder.set_fragment_texture(
802 SpriteInputIndex::AtlasTexture as u64,
803 Some(intermediate_texture),
804 );
805
806 // When copying paths from the intermediate texture to the drawable,
807 // each pixel must only be copied once, in case of transparent paths.
808 //
809 // If all paths have the same draw order, then their bounds are all
810 // disjoint, so we can copy each path's bounds individually. If this
811 // batch combines different draw orders, we perform a single copy
812 // for a minimal spanning rect.
813 let sprites;
814 if paths.last().unwrap().order == first_path.order {
815 sprites = paths
816 .iter()
817 .map(|path| PathSprite {
818 bounds: path.clipped_bounds(),
819 })
820 .collect();
821 } else {
822 let mut bounds = first_path.clipped_bounds();
823 for path in paths.iter().skip(1) {
824 bounds = bounds.union(&path.clipped_bounds());
825 }
826 sprites = vec![PathSprite { bounds }];
827 }
828
829 align_offset(instance_offset);
830 let sprite_bytes_len = mem::size_of_val(sprites.as_slice());
831 let next_offset = *instance_offset + sprite_bytes_len;
832 if next_offset > instance_buffer.size {
833 return false;
834 }
835
836 command_encoder.set_vertex_buffer(
837 SpriteInputIndex::Sprites as u64,
838 Some(&instance_buffer.metal_buffer),
839 *instance_offset as u64,
840 );
841
842 let buffer_contents =
843 unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
844 unsafe {
845 ptr::copy_nonoverlapping(
846 sprites.as_ptr() as *const u8,
847 buffer_contents,
848 sprite_bytes_len,
849 );
850 }
851
852 command_encoder.draw_primitives_instanced(
853 metal::MTLPrimitiveType::Triangle,
854 0,
855 6,
856 sprites.len() as u64,
857 );
858 *instance_offset = next_offset;
859
860 true
861 }
862
863 fn draw_underlines(
864 &self,
865 underlines: &[Underline],
866 instance_buffer: &mut InstanceBuffer,
867 instance_offset: &mut usize,
868 viewport_size: Size<DevicePixels>,
869 command_encoder: &metal::RenderCommandEncoderRef,
870 ) -> bool {
871 if underlines.is_empty() {
872 return true;
873 }
874 align_offset(instance_offset);
875
876 command_encoder.set_render_pipeline_state(&self.underlines_pipeline_state);
877 command_encoder.set_vertex_buffer(
878 UnderlineInputIndex::Vertices as u64,
879 Some(&self.unit_vertices),
880 0,
881 );
882 command_encoder.set_vertex_buffer(
883 UnderlineInputIndex::Underlines as u64,
884 Some(&instance_buffer.metal_buffer),
885 *instance_offset as u64,
886 );
887 command_encoder.set_fragment_buffer(
888 UnderlineInputIndex::Underlines as u64,
889 Some(&instance_buffer.metal_buffer),
890 *instance_offset as u64,
891 );
892
893 command_encoder.set_vertex_bytes(
894 UnderlineInputIndex::ViewportSize as u64,
895 mem::size_of_val(&viewport_size) as u64,
896 &viewport_size as *const Size<DevicePixels> as *const _,
897 );
898
899 let underline_bytes_len = mem::size_of_val(underlines);
900 let buffer_contents =
901 unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
902
903 let next_offset = *instance_offset + underline_bytes_len;
904 if next_offset > instance_buffer.size {
905 return false;
906 }
907
908 unsafe {
909 ptr::copy_nonoverlapping(
910 underlines.as_ptr() as *const u8,
911 buffer_contents,
912 underline_bytes_len,
913 );
914 }
915
916 command_encoder.draw_primitives_instanced(
917 metal::MTLPrimitiveType::Triangle,
918 0,
919 6,
920 underlines.len() as u64,
921 );
922 *instance_offset = next_offset;
923 true
924 }
925
926 fn draw_monochrome_sprites(
927 &self,
928 texture_id: AtlasTextureId,
929 sprites: &[MonochromeSprite],
930 instance_buffer: &mut InstanceBuffer,
931 instance_offset: &mut usize,
932 viewport_size: Size<DevicePixels>,
933 command_encoder: &metal::RenderCommandEncoderRef,
934 ) -> bool {
935 if sprites.is_empty() {
936 return true;
937 }
938 align_offset(instance_offset);
939
940 let sprite_bytes_len = mem::size_of_val(sprites);
941 let buffer_contents =
942 unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
943
944 let next_offset = *instance_offset + sprite_bytes_len;
945 if next_offset > instance_buffer.size {
946 return false;
947 }
948
949 let texture = self.sprite_atlas.metal_texture(texture_id);
950 let texture_size = size(
951 DevicePixels(texture.width() as i32),
952 DevicePixels(texture.height() as i32),
953 );
954 command_encoder.set_render_pipeline_state(&self.monochrome_sprites_pipeline_state);
955 command_encoder.set_vertex_buffer(
956 SpriteInputIndex::Vertices as u64,
957 Some(&self.unit_vertices),
958 0,
959 );
960 command_encoder.set_vertex_buffer(
961 SpriteInputIndex::Sprites as u64,
962 Some(&instance_buffer.metal_buffer),
963 *instance_offset as u64,
964 );
965 command_encoder.set_vertex_bytes(
966 SpriteInputIndex::ViewportSize as u64,
967 mem::size_of_val(&viewport_size) as u64,
968 &viewport_size as *const Size<DevicePixels> as *const _,
969 );
970 command_encoder.set_vertex_bytes(
971 SpriteInputIndex::AtlasTextureSize as u64,
972 mem::size_of_val(&texture_size) as u64,
973 &texture_size as *const Size<DevicePixels> as *const _,
974 );
975 command_encoder.set_fragment_buffer(
976 SpriteInputIndex::Sprites as u64,
977 Some(&instance_buffer.metal_buffer),
978 *instance_offset as u64,
979 );
980 command_encoder.set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
981
982 unsafe {
983 ptr::copy_nonoverlapping(
984 sprites.as_ptr() as *const u8,
985 buffer_contents,
986 sprite_bytes_len,
987 );
988 }
989
990 command_encoder.draw_primitives_instanced(
991 metal::MTLPrimitiveType::Triangle,
992 0,
993 6,
994 sprites.len() as u64,
995 );
996 *instance_offset = next_offset;
997 true
998 }
999
1000 fn draw_polychrome_sprites(
1001 &self,
1002 texture_id: AtlasTextureId,
1003 sprites: &[PolychromeSprite],
1004 instance_buffer: &mut InstanceBuffer,
1005 instance_offset: &mut usize,
1006 viewport_size: Size<DevicePixels>,
1007 command_encoder: &metal::RenderCommandEncoderRef,
1008 ) -> bool {
1009 if sprites.is_empty() {
1010 return true;
1011 }
1012 align_offset(instance_offset);
1013
1014 let texture = self.sprite_atlas.metal_texture(texture_id);
1015 let texture_size = size(
1016 DevicePixels(texture.width() as i32),
1017 DevicePixels(texture.height() as i32),
1018 );
1019 command_encoder.set_render_pipeline_state(&self.polychrome_sprites_pipeline_state);
1020 command_encoder.set_vertex_buffer(
1021 SpriteInputIndex::Vertices as u64,
1022 Some(&self.unit_vertices),
1023 0,
1024 );
1025 command_encoder.set_vertex_buffer(
1026 SpriteInputIndex::Sprites as u64,
1027 Some(&instance_buffer.metal_buffer),
1028 *instance_offset as u64,
1029 );
1030 command_encoder.set_vertex_bytes(
1031 SpriteInputIndex::ViewportSize as u64,
1032 mem::size_of_val(&viewport_size) as u64,
1033 &viewport_size as *const Size<DevicePixels> as *const _,
1034 );
1035 command_encoder.set_vertex_bytes(
1036 SpriteInputIndex::AtlasTextureSize as u64,
1037 mem::size_of_val(&texture_size) as u64,
1038 &texture_size as *const Size<DevicePixels> as *const _,
1039 );
1040 command_encoder.set_fragment_buffer(
1041 SpriteInputIndex::Sprites as u64,
1042 Some(&instance_buffer.metal_buffer),
1043 *instance_offset as u64,
1044 );
1045 command_encoder.set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
1046
1047 let sprite_bytes_len = mem::size_of_val(sprites);
1048 let buffer_contents =
1049 unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
1050
1051 let next_offset = *instance_offset + sprite_bytes_len;
1052 if next_offset > instance_buffer.size {
1053 return false;
1054 }
1055
1056 unsafe {
1057 ptr::copy_nonoverlapping(
1058 sprites.as_ptr() as *const u8,
1059 buffer_contents,
1060 sprite_bytes_len,
1061 );
1062 }
1063
1064 command_encoder.draw_primitives_instanced(
1065 metal::MTLPrimitiveType::Triangle,
1066 0,
1067 6,
1068 sprites.len() as u64,
1069 );
1070 *instance_offset = next_offset;
1071 true
1072 }
1073
1074 fn draw_surfaces(
1075 &mut self,
1076 surfaces: &[PaintSurface],
1077 instance_buffer: &mut InstanceBuffer,
1078 instance_offset: &mut usize,
1079 viewport_size: Size<DevicePixels>,
1080 command_encoder: &metal::RenderCommandEncoderRef,
1081 ) -> bool {
1082 command_encoder.set_render_pipeline_state(&self.surfaces_pipeline_state);
1083 command_encoder.set_vertex_buffer(
1084 SurfaceInputIndex::Vertices as u64,
1085 Some(&self.unit_vertices),
1086 0,
1087 );
1088 command_encoder.set_vertex_bytes(
1089 SurfaceInputIndex::ViewportSize as u64,
1090 mem::size_of_val(&viewport_size) as u64,
1091 &viewport_size as *const Size<DevicePixels> as *const _,
1092 );
1093
1094 for surface in surfaces {
1095 let texture_size = size(
1096 DevicePixels::from(surface.image_buffer.get_width() as i32),
1097 DevicePixels::from(surface.image_buffer.get_height() as i32),
1098 );
1099
1100 assert_eq!(
1101 surface.image_buffer.get_pixel_format(),
1102 kCVPixelFormatType_420YpCbCr8BiPlanarFullRange
1103 );
1104
1105 let y_texture = self
1106 .core_video_texture_cache
1107 .create_texture_from_image(
1108 surface.image_buffer.as_concrete_TypeRef(),
1109 None,
1110 MTLPixelFormat::R8Unorm,
1111 surface.image_buffer.get_width_of_plane(0),
1112 surface.image_buffer.get_height_of_plane(0),
1113 0,
1114 )
1115 .unwrap();
1116 let cb_cr_texture = self
1117 .core_video_texture_cache
1118 .create_texture_from_image(
1119 surface.image_buffer.as_concrete_TypeRef(),
1120 None,
1121 MTLPixelFormat::RG8Unorm,
1122 surface.image_buffer.get_width_of_plane(1),
1123 surface.image_buffer.get_height_of_plane(1),
1124 1,
1125 )
1126 .unwrap();
1127
1128 align_offset(instance_offset);
1129 let next_offset = *instance_offset + mem::size_of::<Surface>();
1130 if next_offset > instance_buffer.size {
1131 return false;
1132 }
1133
1134 command_encoder.set_vertex_buffer(
1135 SurfaceInputIndex::Surfaces as u64,
1136 Some(&instance_buffer.metal_buffer),
1137 *instance_offset as u64,
1138 );
1139 command_encoder.set_vertex_bytes(
1140 SurfaceInputIndex::TextureSize as u64,
1141 mem::size_of_val(&texture_size) as u64,
1142 &texture_size as *const Size<DevicePixels> as *const _,
1143 );
1144 // let y_texture = y_texture.get_texture().unwrap().
1145 command_encoder.set_fragment_texture(SurfaceInputIndex::YTexture as u64, unsafe {
1146 let texture = CVMetalTextureGetTexture(y_texture.as_concrete_TypeRef());
1147 Some(metal::TextureRef::from_ptr(texture as *mut _))
1148 });
1149 command_encoder.set_fragment_texture(SurfaceInputIndex::CbCrTexture as u64, unsafe {
1150 let texture = CVMetalTextureGetTexture(cb_cr_texture.as_concrete_TypeRef());
1151 Some(metal::TextureRef::from_ptr(texture as *mut _))
1152 });
1153
1154 unsafe {
1155 let buffer_contents = (instance_buffer.metal_buffer.contents() as *mut u8)
1156 .add(*instance_offset)
1157 as *mut SurfaceBounds;
1158 ptr::write(
1159 buffer_contents,
1160 SurfaceBounds {
1161 bounds: surface.bounds,
1162 content_mask: surface.content_mask.clone(),
1163 },
1164 );
1165 }
1166
1167 command_encoder.draw_primitives(metal::MTLPrimitiveType::Triangle, 0, 6);
1168 *instance_offset = next_offset;
1169 }
1170 true
1171 }
1172}
1173
1174fn new_command_encoder<'a>(
1175 command_buffer: &'a metal::CommandBufferRef,
1176 drawable: &'a metal::MetalDrawableRef,
1177 viewport_size: Size<DevicePixels>,
1178 configure_color_attachment: impl Fn(&RenderPassColorAttachmentDescriptorRef),
1179) -> &'a metal::RenderCommandEncoderRef {
1180 let render_pass_descriptor = metal::RenderPassDescriptor::new();
1181 let color_attachment = render_pass_descriptor
1182 .color_attachments()
1183 .object_at(0)
1184 .unwrap();
1185 color_attachment.set_texture(Some(drawable.texture()));
1186 color_attachment.set_store_action(metal::MTLStoreAction::Store);
1187 configure_color_attachment(color_attachment);
1188
1189 let command_encoder = command_buffer.new_render_command_encoder(render_pass_descriptor);
1190 command_encoder.set_viewport(metal::MTLViewport {
1191 originX: 0.0,
1192 originY: 0.0,
1193 width: i32::from(viewport_size.width) as f64,
1194 height: i32::from(viewport_size.height) as f64,
1195 znear: 0.0,
1196 zfar: 1.0,
1197 });
1198 command_encoder
1199}
1200
1201fn build_pipeline_state(
1202 device: &metal::DeviceRef,
1203 library: &metal::LibraryRef,
1204 label: &str,
1205 vertex_fn_name: &str,
1206 fragment_fn_name: &str,
1207 pixel_format: metal::MTLPixelFormat,
1208) -> metal::RenderPipelineState {
1209 let vertex_fn = library
1210 .get_function(vertex_fn_name, None)
1211 .expect("error locating vertex function");
1212 let fragment_fn = library
1213 .get_function(fragment_fn_name, None)
1214 .expect("error locating fragment function");
1215
1216 let descriptor = metal::RenderPipelineDescriptor::new();
1217 descriptor.set_label(label);
1218 descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
1219 descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
1220 let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
1221 color_attachment.set_pixel_format(pixel_format);
1222 color_attachment.set_blending_enabled(true);
1223 color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
1224 color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
1225 color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::SourceAlpha);
1226 color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::One);
1227 color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
1228 color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::One);
1229
1230 device
1231 .new_render_pipeline_state(&descriptor)
1232 .expect("could not create render pipeline state")
1233}
1234
1235fn build_path_sprite_pipeline_state(
1236 device: &metal::DeviceRef,
1237 library: &metal::LibraryRef,
1238 label: &str,
1239 vertex_fn_name: &str,
1240 fragment_fn_name: &str,
1241 pixel_format: metal::MTLPixelFormat,
1242) -> metal::RenderPipelineState {
1243 let vertex_fn = library
1244 .get_function(vertex_fn_name, None)
1245 .expect("error locating vertex function");
1246 let fragment_fn = library
1247 .get_function(fragment_fn_name, None)
1248 .expect("error locating fragment function");
1249
1250 let descriptor = metal::RenderPipelineDescriptor::new();
1251 descriptor.set_label(label);
1252 descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
1253 descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
1254 let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
1255 color_attachment.set_pixel_format(pixel_format);
1256 color_attachment.set_blending_enabled(true);
1257 color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
1258 color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
1259 color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::One);
1260 color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::One);
1261 color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
1262 color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::One);
1263
1264 device
1265 .new_render_pipeline_state(&descriptor)
1266 .expect("could not create render pipeline state")
1267}
1268
1269fn build_path_rasterization_pipeline_state(
1270 device: &metal::DeviceRef,
1271 library: &metal::LibraryRef,
1272 label: &str,
1273 vertex_fn_name: &str,
1274 fragment_fn_name: &str,
1275 pixel_format: metal::MTLPixelFormat,
1276 path_sample_count: u32,
1277) -> metal::RenderPipelineState {
1278 let vertex_fn = library
1279 .get_function(vertex_fn_name, None)
1280 .expect("error locating vertex function");
1281 let fragment_fn = library
1282 .get_function(fragment_fn_name, None)
1283 .expect("error locating fragment function");
1284
1285 let descriptor = metal::RenderPipelineDescriptor::new();
1286 descriptor.set_label(label);
1287 descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
1288 descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
1289 if path_sample_count > 1 {
1290 descriptor.set_raster_sample_count(path_sample_count as _);
1291 descriptor.set_alpha_to_coverage_enabled(false);
1292 }
1293 let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
1294 color_attachment.set_pixel_format(pixel_format);
1295 color_attachment.set_blending_enabled(true);
1296 color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
1297 color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
1298 color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::One);
1299 color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::One);
1300 color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
1301 color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
1302
1303 device
1304 .new_render_pipeline_state(&descriptor)
1305 .expect("could not create render pipeline state")
1306}
1307
1308// Align to multiples of 256 make Metal happy.
1309fn align_offset(offset: &mut usize) {
1310 *offset = (*offset).div_ceil(256) * 256;
1311}
1312
1313#[repr(C)]
1314enum ShadowInputIndex {
1315 Vertices = 0,
1316 Shadows = 1,
1317 ViewportSize = 2,
1318}
1319
1320#[repr(C)]
1321enum QuadInputIndex {
1322 Vertices = 0,
1323 Quads = 1,
1324 ViewportSize = 2,
1325}
1326
1327#[repr(C)]
1328enum UnderlineInputIndex {
1329 Vertices = 0,
1330 Underlines = 1,
1331 ViewportSize = 2,
1332}
1333
1334#[repr(C)]
1335enum SpriteInputIndex {
1336 Vertices = 0,
1337 Sprites = 1,
1338 ViewportSize = 2,
1339 AtlasTextureSize = 3,
1340 AtlasTexture = 4,
1341}
1342
1343#[repr(C)]
1344enum SurfaceInputIndex {
1345 Vertices = 0,
1346 Surfaces = 1,
1347 ViewportSize = 2,
1348 TextureSize = 3,
1349 YTexture = 4,
1350 CbCrTexture = 5,
1351}
1352
1353#[repr(C)]
1354enum PathRasterizationInputIndex {
1355 Vertices = 0,
1356 ViewportSize = 1,
1357}
1358
1359#[derive(Clone, Debug, Eq, PartialEq)]
1360#[repr(C)]
1361pub struct PathSprite {
1362 pub bounds: Bounds<ScaledPixels>,
1363}
1364
1365#[derive(Clone, Debug, Eq, PartialEq)]
1366#[repr(C)]
1367pub struct SurfaceBounds {
1368 pub bounds: Bounds<ScaledPixels>,
1369 pub content_mask: ContentMask<ScaledPixels>,
1370}