1use super::metal_atlas::MetalAtlas;
2use crate::{
3 AtlasTextureId, Background, Bounds, ContentMask, DevicePixels, MonochromeSprite, PaintSurface,
4 Path, Point, PolychromeSprite, PrimitiveBatch, Quad, ScaledPixels, Scene, Shadow, Size,
5 Surface, Underline, point, size,
6};
7use anyhow::Result;
8use block::ConcreteBlock;
9use cocoa::{
10 base::{NO, YES},
11 foundation::{NSSize, NSUInteger},
12 quartzcore::AutoresizingMask,
13};
14
15use core_foundation::base::TCFType;
16use core_video::{
17 metal_texture::CVMetalTextureGetTexture, metal_texture_cache::CVMetalTextureCache,
18 pixel_buffer::kCVPixelFormatType_420YpCbCr8BiPlanarFullRange,
19};
20use foreign_types::{ForeignType, ForeignTypeRef};
21use metal::{
22 CAMetalLayer, CommandQueue, MTLPixelFormat, MTLResourceOptions, NSRange,
23 RenderPassColorAttachmentDescriptorRef,
24};
25use objc::{self, msg_send, sel, sel_impl};
26use parking_lot::Mutex;
27
28use std::{cell::Cell, ffi::c_void, mem, ptr, sync::Arc};
29
30// Exported to metal
31pub(crate) type PointF = crate::Point<f32>;
32
33#[cfg(not(feature = "runtime_shaders"))]
34const SHADERS_METALLIB: &[u8] = include_bytes!(concat!(env!("OUT_DIR"), "/shaders.metallib"));
35#[cfg(feature = "runtime_shaders")]
36const SHADERS_SOURCE_FILE: &str = include_str!(concat!(env!("OUT_DIR"), "/stitched_shaders.metal"));
37// Use 4x MSAA, all devices support it.
38// https://developer.apple.com/documentation/metal/mtldevice/1433355-supportstexturesamplecount
39const PATH_SAMPLE_COUNT: u32 = 4;
40
41pub type Context = Arc<Mutex<InstanceBufferPool>>;
42pub type Renderer = MetalRenderer;
43
44pub unsafe fn new_renderer(
45 context: self::Context,
46 _native_window: *mut c_void,
47 _native_view: *mut c_void,
48 _bounds: crate::Size<f32>,
49 transparent: bool,
50) -> Renderer {
51 MetalRenderer::new(context, transparent)
52}
53
54pub(crate) struct InstanceBufferPool {
55 buffer_size: usize,
56 buffers: Vec<metal::Buffer>,
57}
58
59impl Default for InstanceBufferPool {
60 fn default() -> Self {
61 Self {
62 buffer_size: 2 * 1024 * 1024,
63 buffers: Vec::new(),
64 }
65 }
66}
67
68pub(crate) struct InstanceBuffer {
69 metal_buffer: metal::Buffer,
70 size: usize,
71}
72
73impl InstanceBufferPool {
74 pub(crate) fn reset(&mut self, buffer_size: usize) {
75 self.buffer_size = buffer_size;
76 self.buffers.clear();
77 }
78
79 pub(crate) fn acquire(&mut self, device: &metal::Device) -> InstanceBuffer {
80 let buffer = self.buffers.pop().unwrap_or_else(|| {
81 device.new_buffer(
82 self.buffer_size as u64,
83 MTLResourceOptions::StorageModeManaged,
84 )
85 });
86 InstanceBuffer {
87 metal_buffer: buffer,
88 size: self.buffer_size,
89 }
90 }
91
92 pub(crate) fn release(&mut self, buffer: InstanceBuffer) {
93 if buffer.size == self.buffer_size {
94 self.buffers.push(buffer.metal_buffer)
95 }
96 }
97}
98
99pub(crate) struct MetalRenderer {
100 device: metal::Device,
101 layer: metal::MetalLayer,
102 presents_with_transaction: bool,
103 command_queue: CommandQueue,
104 paths_rasterization_pipeline_state: metal::RenderPipelineState,
105 path_sprites_pipeline_state: metal::RenderPipelineState,
106 shadows_pipeline_state: metal::RenderPipelineState,
107 quads_pipeline_state: metal::RenderPipelineState,
108 underlines_pipeline_state: metal::RenderPipelineState,
109 monochrome_sprites_pipeline_state: metal::RenderPipelineState,
110 polychrome_sprites_pipeline_state: metal::RenderPipelineState,
111 surfaces_pipeline_state: metal::RenderPipelineState,
112 unit_vertices: metal::Buffer,
113 #[allow(clippy::arc_with_non_send_sync)]
114 instance_buffer_pool: Arc<Mutex<InstanceBufferPool>>,
115 sprite_atlas: Arc<MetalAtlas>,
116 core_video_texture_cache: core_video::metal_texture_cache::CVMetalTextureCache,
117 path_intermediate_texture: Option<metal::Texture>,
118 path_intermediate_msaa_texture: Option<metal::Texture>,
119 path_sample_count: u32,
120}
121
122#[repr(C)]
123pub struct PathRasterizationVertex {
124 pub xy_position: Point<ScaledPixels>,
125 pub st_position: Point<f32>,
126 pub color: Background,
127 pub bounds: Bounds<ScaledPixels>,
128}
129
130impl MetalRenderer {
131 pub fn new(instance_buffer_pool: Arc<Mutex<InstanceBufferPool>>, transparent: bool) -> Self {
132 // Prefer low‐power integrated GPUs on Intel Mac. On Apple
133 // Silicon, there is only ever one GPU, so this is equivalent to
134 // `metal::Device::system_default()`.
135 let device = if let Some(d) = metal::Device::all()
136 .into_iter()
137 .min_by_key(|d| (d.is_removable(), !d.is_low_power()))
138 {
139 d
140 } else {
141 // For some reason `all()` can return an empty list, see https://github.com/zed-industries/zed/issues/37689
142 // In that case, we fall back to the system default device.
143 log::error!(
144 "Unable to enumerate Metal devices; attempting to use system default device"
145 );
146 metal::Device::system_default().unwrap_or_else(|| {
147 log::error!("unable to access a compatible graphics device");
148 std::process::exit(1);
149 })
150 };
151
152 let layer = metal::MetalLayer::new();
153 layer.set_device(&device);
154 layer.set_pixel_format(MTLPixelFormat::BGRA8Unorm);
155 // Support direct-to-display rendering if the window is not transparent
156 // https://developer.apple.com/documentation/metal/managing-your-game-window-for-metal-in-macos
157 layer.set_opaque(!transparent);
158 layer.set_maximum_drawable_count(3);
159 unsafe {
160 let _: () = msg_send![&*layer, setAllowsNextDrawableTimeout: NO];
161 let _: () = msg_send![&*layer, setNeedsDisplayOnBoundsChange: YES];
162 let _: () = msg_send![
163 &*layer,
164 setAutoresizingMask: AutoresizingMask::WIDTH_SIZABLE
165 | AutoresizingMask::HEIGHT_SIZABLE
166 ];
167 }
168 #[cfg(feature = "runtime_shaders")]
169 let library = device
170 .new_library_with_source(&SHADERS_SOURCE_FILE, &metal::CompileOptions::new())
171 .expect("error building metal library");
172 #[cfg(not(feature = "runtime_shaders"))]
173 let library = device
174 .new_library_with_data(SHADERS_METALLIB)
175 .expect("error building metal library");
176
177 fn to_float2_bits(point: PointF) -> u64 {
178 let mut output = point.y.to_bits() as u64;
179 output <<= 32;
180 output |= point.x.to_bits() as u64;
181 output
182 }
183
184 let unit_vertices = [
185 to_float2_bits(point(0., 0.)),
186 to_float2_bits(point(1., 0.)),
187 to_float2_bits(point(0., 1.)),
188 to_float2_bits(point(0., 1.)),
189 to_float2_bits(point(1., 0.)),
190 to_float2_bits(point(1., 1.)),
191 ];
192 let unit_vertices = device.new_buffer_with_data(
193 unit_vertices.as_ptr() as *const c_void,
194 mem::size_of_val(&unit_vertices) as u64,
195 MTLResourceOptions::StorageModeManaged,
196 );
197
198 let paths_rasterization_pipeline_state = build_path_rasterization_pipeline_state(
199 &device,
200 &library,
201 "paths_rasterization",
202 "path_rasterization_vertex",
203 "path_rasterization_fragment",
204 MTLPixelFormat::BGRA8Unorm,
205 PATH_SAMPLE_COUNT,
206 );
207 let path_sprites_pipeline_state = build_path_sprite_pipeline_state(
208 &device,
209 &library,
210 "path_sprites",
211 "path_sprite_vertex",
212 "path_sprite_fragment",
213 MTLPixelFormat::BGRA8Unorm,
214 );
215 let shadows_pipeline_state = build_pipeline_state(
216 &device,
217 &library,
218 "shadows",
219 "shadow_vertex",
220 "shadow_fragment",
221 MTLPixelFormat::BGRA8Unorm,
222 );
223 let quads_pipeline_state = build_pipeline_state(
224 &device,
225 &library,
226 "quads",
227 "quad_vertex",
228 "quad_fragment",
229 MTLPixelFormat::BGRA8Unorm,
230 );
231 let underlines_pipeline_state = build_pipeline_state(
232 &device,
233 &library,
234 "underlines",
235 "underline_vertex",
236 "underline_fragment",
237 MTLPixelFormat::BGRA8Unorm,
238 );
239 let monochrome_sprites_pipeline_state = build_pipeline_state(
240 &device,
241 &library,
242 "monochrome_sprites",
243 "monochrome_sprite_vertex",
244 "monochrome_sprite_fragment",
245 MTLPixelFormat::BGRA8Unorm,
246 );
247 let polychrome_sprites_pipeline_state = build_pipeline_state(
248 &device,
249 &library,
250 "polychrome_sprites",
251 "polychrome_sprite_vertex",
252 "polychrome_sprite_fragment",
253 MTLPixelFormat::BGRA8Unorm,
254 );
255 let surfaces_pipeline_state = build_pipeline_state(
256 &device,
257 &library,
258 "surfaces",
259 "surface_vertex",
260 "surface_fragment",
261 MTLPixelFormat::BGRA8Unorm,
262 );
263
264 let command_queue = device.new_command_queue();
265 let sprite_atlas = Arc::new(MetalAtlas::new(device.clone()));
266 let core_video_texture_cache =
267 CVMetalTextureCache::new(None, device.clone(), None).unwrap();
268
269 Self {
270 device,
271 layer,
272 presents_with_transaction: false,
273 command_queue,
274 paths_rasterization_pipeline_state,
275 path_sprites_pipeline_state,
276 shadows_pipeline_state,
277 quads_pipeline_state,
278 underlines_pipeline_state,
279 monochrome_sprites_pipeline_state,
280 polychrome_sprites_pipeline_state,
281 surfaces_pipeline_state,
282 unit_vertices,
283 instance_buffer_pool,
284 sprite_atlas,
285 core_video_texture_cache,
286 path_intermediate_texture: None,
287 path_intermediate_msaa_texture: None,
288 path_sample_count: PATH_SAMPLE_COUNT,
289 }
290 }
291
292 pub fn layer(&self) -> &metal::MetalLayerRef {
293 &self.layer
294 }
295
296 pub fn layer_ptr(&self) -> *mut CAMetalLayer {
297 self.layer.as_ptr()
298 }
299
300 pub fn sprite_atlas(&self) -> &Arc<MetalAtlas> {
301 &self.sprite_atlas
302 }
303
304 pub fn set_presents_with_transaction(&mut self, presents_with_transaction: bool) {
305 self.presents_with_transaction = presents_with_transaction;
306 self.layer
307 .set_presents_with_transaction(presents_with_transaction);
308 }
309
310 pub fn update_drawable_size(&mut self, size: Size<DevicePixels>) {
311 let size = NSSize {
312 width: size.width.0 as f64,
313 height: size.height.0 as f64,
314 };
315 unsafe {
316 let _: () = msg_send![
317 self.layer(),
318 setDrawableSize: size
319 ];
320 }
321 let device_pixels_size = Size {
322 width: DevicePixels(size.width as i32),
323 height: DevicePixels(size.height as i32),
324 };
325 self.update_path_intermediate_textures(device_pixels_size);
326 }
327
328 fn update_path_intermediate_textures(&mut self, size: Size<DevicePixels>) {
329 // We are uncertain when this happens, but sometimes size can be 0 here. Most likely before
330 // the layout pass on window creation. Zero-sized texture creation causes SIGABRT.
331 // https://github.com/zed-industries/zed/issues/36229
332 if size.width.0 <= 0 || size.height.0 <= 0 {
333 self.path_intermediate_texture = None;
334 self.path_intermediate_msaa_texture = None;
335 return;
336 }
337
338 let texture_descriptor = metal::TextureDescriptor::new();
339 texture_descriptor.set_width(size.width.0 as u64);
340 texture_descriptor.set_height(size.height.0 as u64);
341 texture_descriptor.set_pixel_format(metal::MTLPixelFormat::BGRA8Unorm);
342 texture_descriptor
343 .set_usage(metal::MTLTextureUsage::RenderTarget | metal::MTLTextureUsage::ShaderRead);
344 self.path_intermediate_texture = Some(self.device.new_texture(&texture_descriptor));
345
346 if self.path_sample_count > 1 {
347 let mut msaa_descriptor = texture_descriptor;
348 msaa_descriptor.set_texture_type(metal::MTLTextureType::D2Multisample);
349 msaa_descriptor.set_storage_mode(metal::MTLStorageMode::Private);
350 msaa_descriptor.set_sample_count(self.path_sample_count as _);
351 self.path_intermediate_msaa_texture = Some(self.device.new_texture(&msaa_descriptor));
352 } else {
353 self.path_intermediate_msaa_texture = None;
354 }
355 }
356
357 pub fn update_transparency(&self, transparent: bool) {
358 self.layer.set_opaque(!transparent);
359 }
360
361 pub fn destroy(&self) {
362 // nothing to do
363 }
364
365 pub fn draw(&mut self, scene: &Scene) {
366 let layer = self.layer.clone();
367 let viewport_size = layer.drawable_size();
368 let viewport_size: Size<DevicePixels> = size(
369 (viewport_size.width.ceil() as i32).into(),
370 (viewport_size.height.ceil() as i32).into(),
371 );
372 let drawable = if let Some(drawable) = layer.next_drawable() {
373 drawable
374 } else {
375 log::error!(
376 "failed to retrieve next drawable, drawable size: {:?}",
377 viewport_size
378 );
379 return;
380 };
381
382 loop {
383 let mut instance_buffer = self.instance_buffer_pool.lock().acquire(&self.device);
384
385 let command_buffer =
386 self.draw_primitives(scene, &mut instance_buffer, drawable, viewport_size);
387
388 match command_buffer {
389 Ok(command_buffer) => {
390 let instance_buffer_pool = self.instance_buffer_pool.clone();
391 let instance_buffer = Cell::new(Some(instance_buffer));
392 let block = ConcreteBlock::new(move |_| {
393 if let Some(instance_buffer) = instance_buffer.take() {
394 instance_buffer_pool.lock().release(instance_buffer);
395 }
396 });
397 let block = block.copy();
398 command_buffer.add_completed_handler(&block);
399
400 if self.presents_with_transaction {
401 command_buffer.commit();
402 command_buffer.wait_until_scheduled();
403 drawable.present();
404 } else {
405 command_buffer.present_drawable(drawable);
406 command_buffer.commit();
407 }
408 return;
409 }
410 Err(err) => {
411 log::error!(
412 "failed to render: {}. retrying with larger instance buffer size",
413 err
414 );
415 let mut instance_buffer_pool = self.instance_buffer_pool.lock();
416 let buffer_size = instance_buffer_pool.buffer_size;
417 if buffer_size >= 256 * 1024 * 1024 {
418 log::error!("instance buffer size grew too large: {}", buffer_size);
419 break;
420 }
421 instance_buffer_pool.reset(buffer_size * 2);
422 log::info!(
423 "increased instance buffer size to {}",
424 instance_buffer_pool.buffer_size
425 );
426 }
427 }
428 }
429 }
430
431 fn draw_primitives(
432 &mut self,
433 scene: &Scene,
434 instance_buffer: &mut InstanceBuffer,
435 drawable: &metal::MetalDrawableRef,
436 viewport_size: Size<DevicePixels>,
437 ) -> Result<metal::CommandBuffer> {
438 let command_queue = self.command_queue.clone();
439 let command_buffer = command_queue.new_command_buffer();
440 let alpha = if self.layer.is_opaque() { 1. } else { 0. };
441 let mut instance_offset = 0;
442
443 let mut command_encoder = new_command_encoder(
444 command_buffer,
445 drawable,
446 viewport_size,
447 |color_attachment| {
448 color_attachment.set_load_action(metal::MTLLoadAction::Clear);
449 color_attachment.set_clear_color(metal::MTLClearColor::new(0., 0., 0., alpha));
450 },
451 );
452
453 for batch in scene.batches() {
454 let ok = match batch {
455 PrimitiveBatch::Shadows(shadows) => self.draw_shadows(
456 shadows,
457 instance_buffer,
458 &mut instance_offset,
459 viewport_size,
460 command_encoder,
461 ),
462 PrimitiveBatch::Quads(quads) => self.draw_quads(
463 quads,
464 instance_buffer,
465 &mut instance_offset,
466 viewport_size,
467 command_encoder,
468 ),
469 PrimitiveBatch::Paths(paths) => {
470 command_encoder.end_encoding();
471
472 let did_draw = self.draw_paths_to_intermediate(
473 paths,
474 instance_buffer,
475 &mut instance_offset,
476 viewport_size,
477 command_buffer,
478 );
479
480 command_encoder = new_command_encoder(
481 command_buffer,
482 drawable,
483 viewport_size,
484 |color_attachment| {
485 color_attachment.set_load_action(metal::MTLLoadAction::Load);
486 },
487 );
488
489 if did_draw {
490 self.draw_paths_from_intermediate(
491 paths,
492 instance_buffer,
493 &mut instance_offset,
494 viewport_size,
495 command_encoder,
496 )
497 } else {
498 false
499 }
500 }
501 PrimitiveBatch::Underlines(underlines) => self.draw_underlines(
502 underlines,
503 instance_buffer,
504 &mut instance_offset,
505 viewport_size,
506 command_encoder,
507 ),
508 PrimitiveBatch::MonochromeSprites {
509 texture_id,
510 sprites,
511 } => self.draw_monochrome_sprites(
512 texture_id,
513 sprites,
514 instance_buffer,
515 &mut instance_offset,
516 viewport_size,
517 command_encoder,
518 ),
519 PrimitiveBatch::PolychromeSprites {
520 texture_id,
521 sprites,
522 } => self.draw_polychrome_sprites(
523 texture_id,
524 sprites,
525 instance_buffer,
526 &mut instance_offset,
527 viewport_size,
528 command_encoder,
529 ),
530 PrimitiveBatch::Surfaces(surfaces) => self.draw_surfaces(
531 surfaces,
532 instance_buffer,
533 &mut instance_offset,
534 viewport_size,
535 command_encoder,
536 ),
537 };
538 if !ok {
539 command_encoder.end_encoding();
540 anyhow::bail!(
541 "scene too large: {} paths, {} shadows, {} quads, {} underlines, {} mono, {} poly, {} surfaces",
542 scene.paths.len(),
543 scene.shadows.len(),
544 scene.quads.len(),
545 scene.underlines.len(),
546 scene.monochrome_sprites.len(),
547 scene.polychrome_sprites.len(),
548 scene.surfaces.len(),
549 );
550 }
551 }
552
553 command_encoder.end_encoding();
554
555 instance_buffer.metal_buffer.did_modify_range(NSRange {
556 location: 0,
557 length: instance_offset as NSUInteger,
558 });
559 Ok(command_buffer.to_owned())
560 }
561
562 fn draw_paths_to_intermediate(
563 &self,
564 paths: &[Path<ScaledPixels>],
565 instance_buffer: &mut InstanceBuffer,
566 instance_offset: &mut usize,
567 viewport_size: Size<DevicePixels>,
568 command_buffer: &metal::CommandBufferRef,
569 ) -> bool {
570 if paths.is_empty() {
571 return true;
572 }
573 let Some(intermediate_texture) = &self.path_intermediate_texture else {
574 return false;
575 };
576
577 let render_pass_descriptor = metal::RenderPassDescriptor::new();
578 let color_attachment = render_pass_descriptor
579 .color_attachments()
580 .object_at(0)
581 .unwrap();
582 color_attachment.set_load_action(metal::MTLLoadAction::Clear);
583 color_attachment.set_clear_color(metal::MTLClearColor::new(0., 0., 0., 0.));
584
585 if let Some(msaa_texture) = &self.path_intermediate_msaa_texture {
586 color_attachment.set_texture(Some(msaa_texture));
587 color_attachment.set_resolve_texture(Some(intermediate_texture));
588 color_attachment.set_store_action(metal::MTLStoreAction::MultisampleResolve);
589 } else {
590 color_attachment.set_texture(Some(intermediate_texture));
591 color_attachment.set_store_action(metal::MTLStoreAction::Store);
592 }
593
594 let command_encoder = command_buffer.new_render_command_encoder(render_pass_descriptor);
595 command_encoder.set_render_pipeline_state(&self.paths_rasterization_pipeline_state);
596
597 align_offset(instance_offset);
598 let mut vertices = Vec::new();
599 for path in paths {
600 vertices.extend(path.vertices.iter().map(|v| PathRasterizationVertex {
601 xy_position: v.xy_position,
602 st_position: v.st_position,
603 color: path.color,
604 bounds: path.bounds.intersect(&path.content_mask.bounds),
605 }));
606 }
607 let vertices_bytes_len = mem::size_of_val(vertices.as_slice());
608 let next_offset = *instance_offset + vertices_bytes_len;
609 if next_offset > instance_buffer.size {
610 command_encoder.end_encoding();
611 return false;
612 }
613 command_encoder.set_vertex_buffer(
614 PathRasterizationInputIndex::Vertices as u64,
615 Some(&instance_buffer.metal_buffer),
616 *instance_offset as u64,
617 );
618 command_encoder.set_vertex_bytes(
619 PathRasterizationInputIndex::ViewportSize as u64,
620 mem::size_of_val(&viewport_size) as u64,
621 &viewport_size as *const Size<DevicePixels> as *const _,
622 );
623 command_encoder.set_fragment_buffer(
624 PathRasterizationInputIndex::Vertices as u64,
625 Some(&instance_buffer.metal_buffer),
626 *instance_offset as u64,
627 );
628 let buffer_contents =
629 unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
630 unsafe {
631 ptr::copy_nonoverlapping(
632 vertices.as_ptr() as *const u8,
633 buffer_contents,
634 vertices_bytes_len,
635 );
636 }
637 command_encoder.draw_primitives(
638 metal::MTLPrimitiveType::Triangle,
639 0,
640 vertices.len() as u64,
641 );
642 *instance_offset = next_offset;
643
644 command_encoder.end_encoding();
645 true
646 }
647
648 fn draw_shadows(
649 &self,
650 shadows: &[Shadow],
651 instance_buffer: &mut InstanceBuffer,
652 instance_offset: &mut usize,
653 viewport_size: Size<DevicePixels>,
654 command_encoder: &metal::RenderCommandEncoderRef,
655 ) -> bool {
656 if shadows.is_empty() {
657 return true;
658 }
659 align_offset(instance_offset);
660
661 command_encoder.set_render_pipeline_state(&self.shadows_pipeline_state);
662 command_encoder.set_vertex_buffer(
663 ShadowInputIndex::Vertices as u64,
664 Some(&self.unit_vertices),
665 0,
666 );
667 command_encoder.set_vertex_buffer(
668 ShadowInputIndex::Shadows as u64,
669 Some(&instance_buffer.metal_buffer),
670 *instance_offset as u64,
671 );
672 command_encoder.set_fragment_buffer(
673 ShadowInputIndex::Shadows as u64,
674 Some(&instance_buffer.metal_buffer),
675 *instance_offset as u64,
676 );
677
678 command_encoder.set_vertex_bytes(
679 ShadowInputIndex::ViewportSize as u64,
680 mem::size_of_val(&viewport_size) as u64,
681 &viewport_size as *const Size<DevicePixels> as *const _,
682 );
683
684 let shadow_bytes_len = mem::size_of_val(shadows);
685 let buffer_contents =
686 unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
687
688 let next_offset = *instance_offset + shadow_bytes_len;
689 if next_offset > instance_buffer.size {
690 return false;
691 }
692
693 unsafe {
694 ptr::copy_nonoverlapping(
695 shadows.as_ptr() as *const u8,
696 buffer_contents,
697 shadow_bytes_len,
698 );
699 }
700
701 command_encoder.draw_primitives_instanced(
702 metal::MTLPrimitiveType::Triangle,
703 0,
704 6,
705 shadows.len() as u64,
706 );
707 *instance_offset = next_offset;
708 true
709 }
710
711 fn draw_quads(
712 &self,
713 quads: &[Quad],
714 instance_buffer: &mut InstanceBuffer,
715 instance_offset: &mut usize,
716 viewport_size: Size<DevicePixels>,
717 command_encoder: &metal::RenderCommandEncoderRef,
718 ) -> bool {
719 if quads.is_empty() {
720 return true;
721 }
722 align_offset(instance_offset);
723
724 command_encoder.set_render_pipeline_state(&self.quads_pipeline_state);
725 command_encoder.set_vertex_buffer(
726 QuadInputIndex::Vertices as u64,
727 Some(&self.unit_vertices),
728 0,
729 );
730 command_encoder.set_vertex_buffer(
731 QuadInputIndex::Quads as u64,
732 Some(&instance_buffer.metal_buffer),
733 *instance_offset as u64,
734 );
735 command_encoder.set_fragment_buffer(
736 QuadInputIndex::Quads as u64,
737 Some(&instance_buffer.metal_buffer),
738 *instance_offset as u64,
739 );
740
741 command_encoder.set_vertex_bytes(
742 QuadInputIndex::ViewportSize as u64,
743 mem::size_of_val(&viewport_size) as u64,
744 &viewport_size as *const Size<DevicePixels> as *const _,
745 );
746
747 let quad_bytes_len = mem::size_of_val(quads);
748 let buffer_contents =
749 unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
750
751 let next_offset = *instance_offset + quad_bytes_len;
752 if next_offset > instance_buffer.size {
753 return false;
754 }
755
756 unsafe {
757 ptr::copy_nonoverlapping(quads.as_ptr() as *const u8, buffer_contents, quad_bytes_len);
758 }
759
760 command_encoder.draw_primitives_instanced(
761 metal::MTLPrimitiveType::Triangle,
762 0,
763 6,
764 quads.len() as u64,
765 );
766 *instance_offset = next_offset;
767 true
768 }
769
770 fn draw_paths_from_intermediate(
771 &self,
772 paths: &[Path<ScaledPixels>],
773 instance_buffer: &mut InstanceBuffer,
774 instance_offset: &mut usize,
775 viewport_size: Size<DevicePixels>,
776 command_encoder: &metal::RenderCommandEncoderRef,
777 ) -> bool {
778 let Some(first_path) = paths.first() else {
779 return true;
780 };
781
782 let Some(ref intermediate_texture) = self.path_intermediate_texture else {
783 return false;
784 };
785
786 command_encoder.set_render_pipeline_state(&self.path_sprites_pipeline_state);
787 command_encoder.set_vertex_buffer(
788 SpriteInputIndex::Vertices as u64,
789 Some(&self.unit_vertices),
790 0,
791 );
792 command_encoder.set_vertex_bytes(
793 SpriteInputIndex::ViewportSize as u64,
794 mem::size_of_val(&viewport_size) as u64,
795 &viewport_size as *const Size<DevicePixels> as *const _,
796 );
797
798 command_encoder.set_fragment_texture(
799 SpriteInputIndex::AtlasTexture as u64,
800 Some(intermediate_texture),
801 );
802
803 // When copying paths from the intermediate texture to the drawable,
804 // each pixel must only be copied once, in case of transparent paths.
805 //
806 // If all paths have the same draw order, then their bounds are all
807 // disjoint, so we can copy each path's bounds individually. If this
808 // batch combines different draw orders, we perform a single copy
809 // for a minimal spanning rect.
810 let sprites;
811 if paths.last().unwrap().order == first_path.order {
812 sprites = paths
813 .iter()
814 .map(|path| PathSprite {
815 bounds: path.clipped_bounds(),
816 })
817 .collect();
818 } else {
819 let mut bounds = first_path.clipped_bounds();
820 for path in paths.iter().skip(1) {
821 bounds = bounds.union(&path.clipped_bounds());
822 }
823 sprites = vec![PathSprite { bounds }];
824 }
825
826 align_offset(instance_offset);
827 let sprite_bytes_len = mem::size_of_val(sprites.as_slice());
828 let next_offset = *instance_offset + sprite_bytes_len;
829 if next_offset > instance_buffer.size {
830 return false;
831 }
832
833 command_encoder.set_vertex_buffer(
834 SpriteInputIndex::Sprites as u64,
835 Some(&instance_buffer.metal_buffer),
836 *instance_offset as u64,
837 );
838
839 let buffer_contents =
840 unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
841 unsafe {
842 ptr::copy_nonoverlapping(
843 sprites.as_ptr() as *const u8,
844 buffer_contents,
845 sprite_bytes_len,
846 );
847 }
848
849 command_encoder.draw_primitives_instanced(
850 metal::MTLPrimitiveType::Triangle,
851 0,
852 6,
853 sprites.len() as u64,
854 );
855 *instance_offset = next_offset;
856
857 true
858 }
859
860 fn draw_underlines(
861 &self,
862 underlines: &[Underline],
863 instance_buffer: &mut InstanceBuffer,
864 instance_offset: &mut usize,
865 viewport_size: Size<DevicePixels>,
866 command_encoder: &metal::RenderCommandEncoderRef,
867 ) -> bool {
868 if underlines.is_empty() {
869 return true;
870 }
871 align_offset(instance_offset);
872
873 command_encoder.set_render_pipeline_state(&self.underlines_pipeline_state);
874 command_encoder.set_vertex_buffer(
875 UnderlineInputIndex::Vertices as u64,
876 Some(&self.unit_vertices),
877 0,
878 );
879 command_encoder.set_vertex_buffer(
880 UnderlineInputIndex::Underlines as u64,
881 Some(&instance_buffer.metal_buffer),
882 *instance_offset as u64,
883 );
884 command_encoder.set_fragment_buffer(
885 UnderlineInputIndex::Underlines as u64,
886 Some(&instance_buffer.metal_buffer),
887 *instance_offset as u64,
888 );
889
890 command_encoder.set_vertex_bytes(
891 UnderlineInputIndex::ViewportSize as u64,
892 mem::size_of_val(&viewport_size) as u64,
893 &viewport_size as *const Size<DevicePixels> as *const _,
894 );
895
896 let underline_bytes_len = mem::size_of_val(underlines);
897 let buffer_contents =
898 unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
899
900 let next_offset = *instance_offset + underline_bytes_len;
901 if next_offset > instance_buffer.size {
902 return false;
903 }
904
905 unsafe {
906 ptr::copy_nonoverlapping(
907 underlines.as_ptr() as *const u8,
908 buffer_contents,
909 underline_bytes_len,
910 );
911 }
912
913 command_encoder.draw_primitives_instanced(
914 metal::MTLPrimitiveType::Triangle,
915 0,
916 6,
917 underlines.len() as u64,
918 );
919 *instance_offset = next_offset;
920 true
921 }
922
923 fn draw_monochrome_sprites(
924 &self,
925 texture_id: AtlasTextureId,
926 sprites: &[MonochromeSprite],
927 instance_buffer: &mut InstanceBuffer,
928 instance_offset: &mut usize,
929 viewport_size: Size<DevicePixels>,
930 command_encoder: &metal::RenderCommandEncoderRef,
931 ) -> bool {
932 if sprites.is_empty() {
933 return true;
934 }
935 align_offset(instance_offset);
936
937 let sprite_bytes_len = mem::size_of_val(sprites);
938 let buffer_contents =
939 unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
940
941 let next_offset = *instance_offset + sprite_bytes_len;
942 if next_offset > instance_buffer.size {
943 return false;
944 }
945
946 let texture = self.sprite_atlas.metal_texture(texture_id);
947 let texture_size = size(
948 DevicePixels(texture.width() as i32),
949 DevicePixels(texture.height() as i32),
950 );
951 command_encoder.set_render_pipeline_state(&self.monochrome_sprites_pipeline_state);
952 command_encoder.set_vertex_buffer(
953 SpriteInputIndex::Vertices as u64,
954 Some(&self.unit_vertices),
955 0,
956 );
957 command_encoder.set_vertex_buffer(
958 SpriteInputIndex::Sprites as u64,
959 Some(&instance_buffer.metal_buffer),
960 *instance_offset as u64,
961 );
962 command_encoder.set_vertex_bytes(
963 SpriteInputIndex::ViewportSize as u64,
964 mem::size_of_val(&viewport_size) as u64,
965 &viewport_size as *const Size<DevicePixels> as *const _,
966 );
967 command_encoder.set_vertex_bytes(
968 SpriteInputIndex::AtlasTextureSize as u64,
969 mem::size_of_val(&texture_size) as u64,
970 &texture_size as *const Size<DevicePixels> as *const _,
971 );
972 command_encoder.set_fragment_buffer(
973 SpriteInputIndex::Sprites as u64,
974 Some(&instance_buffer.metal_buffer),
975 *instance_offset as u64,
976 );
977 command_encoder.set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
978
979 unsafe {
980 ptr::copy_nonoverlapping(
981 sprites.as_ptr() as *const u8,
982 buffer_contents,
983 sprite_bytes_len,
984 );
985 }
986
987 command_encoder.draw_primitives_instanced(
988 metal::MTLPrimitiveType::Triangle,
989 0,
990 6,
991 sprites.len() as u64,
992 );
993 *instance_offset = next_offset;
994 true
995 }
996
997 fn draw_polychrome_sprites(
998 &self,
999 texture_id: AtlasTextureId,
1000 sprites: &[PolychromeSprite],
1001 instance_buffer: &mut InstanceBuffer,
1002 instance_offset: &mut usize,
1003 viewport_size: Size<DevicePixels>,
1004 command_encoder: &metal::RenderCommandEncoderRef,
1005 ) -> bool {
1006 if sprites.is_empty() {
1007 return true;
1008 }
1009 align_offset(instance_offset);
1010
1011 let texture = self.sprite_atlas.metal_texture(texture_id);
1012 let texture_size = size(
1013 DevicePixels(texture.width() as i32),
1014 DevicePixels(texture.height() as i32),
1015 );
1016 command_encoder.set_render_pipeline_state(&self.polychrome_sprites_pipeline_state);
1017 command_encoder.set_vertex_buffer(
1018 SpriteInputIndex::Vertices as u64,
1019 Some(&self.unit_vertices),
1020 0,
1021 );
1022 command_encoder.set_vertex_buffer(
1023 SpriteInputIndex::Sprites as u64,
1024 Some(&instance_buffer.metal_buffer),
1025 *instance_offset as u64,
1026 );
1027 command_encoder.set_vertex_bytes(
1028 SpriteInputIndex::ViewportSize as u64,
1029 mem::size_of_val(&viewport_size) as u64,
1030 &viewport_size as *const Size<DevicePixels> as *const _,
1031 );
1032 command_encoder.set_vertex_bytes(
1033 SpriteInputIndex::AtlasTextureSize as u64,
1034 mem::size_of_val(&texture_size) as u64,
1035 &texture_size as *const Size<DevicePixels> as *const _,
1036 );
1037 command_encoder.set_fragment_buffer(
1038 SpriteInputIndex::Sprites as u64,
1039 Some(&instance_buffer.metal_buffer),
1040 *instance_offset as u64,
1041 );
1042 command_encoder.set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
1043
1044 let sprite_bytes_len = mem::size_of_val(sprites);
1045 let buffer_contents =
1046 unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
1047
1048 let next_offset = *instance_offset + sprite_bytes_len;
1049 if next_offset > instance_buffer.size {
1050 return false;
1051 }
1052
1053 unsafe {
1054 ptr::copy_nonoverlapping(
1055 sprites.as_ptr() as *const u8,
1056 buffer_contents,
1057 sprite_bytes_len,
1058 );
1059 }
1060
1061 command_encoder.draw_primitives_instanced(
1062 metal::MTLPrimitiveType::Triangle,
1063 0,
1064 6,
1065 sprites.len() as u64,
1066 );
1067 *instance_offset = next_offset;
1068 true
1069 }
1070
1071 fn draw_surfaces(
1072 &mut self,
1073 surfaces: &[PaintSurface],
1074 instance_buffer: &mut InstanceBuffer,
1075 instance_offset: &mut usize,
1076 viewport_size: Size<DevicePixels>,
1077 command_encoder: &metal::RenderCommandEncoderRef,
1078 ) -> bool {
1079 command_encoder.set_render_pipeline_state(&self.surfaces_pipeline_state);
1080 command_encoder.set_vertex_buffer(
1081 SurfaceInputIndex::Vertices as u64,
1082 Some(&self.unit_vertices),
1083 0,
1084 );
1085 command_encoder.set_vertex_bytes(
1086 SurfaceInputIndex::ViewportSize as u64,
1087 mem::size_of_val(&viewport_size) as u64,
1088 &viewport_size as *const Size<DevicePixels> as *const _,
1089 );
1090
1091 for surface in surfaces {
1092 let texture_size = size(
1093 DevicePixels::from(surface.image_buffer.get_width() as i32),
1094 DevicePixels::from(surface.image_buffer.get_height() as i32),
1095 );
1096
1097 assert_eq!(
1098 surface.image_buffer.get_pixel_format(),
1099 kCVPixelFormatType_420YpCbCr8BiPlanarFullRange
1100 );
1101
1102 let y_texture = self
1103 .core_video_texture_cache
1104 .create_texture_from_image(
1105 surface.image_buffer.as_concrete_TypeRef(),
1106 None,
1107 MTLPixelFormat::R8Unorm,
1108 surface.image_buffer.get_width_of_plane(0),
1109 surface.image_buffer.get_height_of_plane(0),
1110 0,
1111 )
1112 .unwrap();
1113 let cb_cr_texture = self
1114 .core_video_texture_cache
1115 .create_texture_from_image(
1116 surface.image_buffer.as_concrete_TypeRef(),
1117 None,
1118 MTLPixelFormat::RG8Unorm,
1119 surface.image_buffer.get_width_of_plane(1),
1120 surface.image_buffer.get_height_of_plane(1),
1121 1,
1122 )
1123 .unwrap();
1124
1125 align_offset(instance_offset);
1126 let next_offset = *instance_offset + mem::size_of::<Surface>();
1127 if next_offset > instance_buffer.size {
1128 return false;
1129 }
1130
1131 command_encoder.set_vertex_buffer(
1132 SurfaceInputIndex::Surfaces as u64,
1133 Some(&instance_buffer.metal_buffer),
1134 *instance_offset as u64,
1135 );
1136 command_encoder.set_vertex_bytes(
1137 SurfaceInputIndex::TextureSize as u64,
1138 mem::size_of_val(&texture_size) as u64,
1139 &texture_size as *const Size<DevicePixels> as *const _,
1140 );
1141 // let y_texture = y_texture.get_texture().unwrap().
1142 command_encoder.set_fragment_texture(SurfaceInputIndex::YTexture as u64, unsafe {
1143 let texture = CVMetalTextureGetTexture(y_texture.as_concrete_TypeRef());
1144 Some(metal::TextureRef::from_ptr(texture as *mut _))
1145 });
1146 command_encoder.set_fragment_texture(SurfaceInputIndex::CbCrTexture as u64, unsafe {
1147 let texture = CVMetalTextureGetTexture(cb_cr_texture.as_concrete_TypeRef());
1148 Some(metal::TextureRef::from_ptr(texture as *mut _))
1149 });
1150
1151 unsafe {
1152 let buffer_contents = (instance_buffer.metal_buffer.contents() as *mut u8)
1153 .add(*instance_offset)
1154 as *mut SurfaceBounds;
1155 ptr::write(
1156 buffer_contents,
1157 SurfaceBounds {
1158 bounds: surface.bounds,
1159 content_mask: surface.content_mask.clone(),
1160 },
1161 );
1162 }
1163
1164 command_encoder.draw_primitives(metal::MTLPrimitiveType::Triangle, 0, 6);
1165 *instance_offset = next_offset;
1166 }
1167 true
1168 }
1169}
1170
1171fn new_command_encoder<'a>(
1172 command_buffer: &'a metal::CommandBufferRef,
1173 drawable: &'a metal::MetalDrawableRef,
1174 viewport_size: Size<DevicePixels>,
1175 configure_color_attachment: impl Fn(&RenderPassColorAttachmentDescriptorRef),
1176) -> &'a metal::RenderCommandEncoderRef {
1177 let render_pass_descriptor = metal::RenderPassDescriptor::new();
1178 let color_attachment = render_pass_descriptor
1179 .color_attachments()
1180 .object_at(0)
1181 .unwrap();
1182 color_attachment.set_texture(Some(drawable.texture()));
1183 color_attachment.set_store_action(metal::MTLStoreAction::Store);
1184 configure_color_attachment(color_attachment);
1185
1186 let command_encoder = command_buffer.new_render_command_encoder(render_pass_descriptor);
1187 command_encoder.set_viewport(metal::MTLViewport {
1188 originX: 0.0,
1189 originY: 0.0,
1190 width: i32::from(viewport_size.width) as f64,
1191 height: i32::from(viewport_size.height) as f64,
1192 znear: 0.0,
1193 zfar: 1.0,
1194 });
1195 command_encoder
1196}
1197
1198fn build_pipeline_state(
1199 device: &metal::DeviceRef,
1200 library: &metal::LibraryRef,
1201 label: &str,
1202 vertex_fn_name: &str,
1203 fragment_fn_name: &str,
1204 pixel_format: metal::MTLPixelFormat,
1205) -> metal::RenderPipelineState {
1206 let vertex_fn = library
1207 .get_function(vertex_fn_name, None)
1208 .expect("error locating vertex function");
1209 let fragment_fn = library
1210 .get_function(fragment_fn_name, None)
1211 .expect("error locating fragment function");
1212
1213 let descriptor = metal::RenderPipelineDescriptor::new();
1214 descriptor.set_label(label);
1215 descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
1216 descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
1217 let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
1218 color_attachment.set_pixel_format(pixel_format);
1219 color_attachment.set_blending_enabled(true);
1220 color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
1221 color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
1222 color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::SourceAlpha);
1223 color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::One);
1224 color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
1225 color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::One);
1226
1227 device
1228 .new_render_pipeline_state(&descriptor)
1229 .expect("could not create render pipeline state")
1230}
1231
1232fn build_path_sprite_pipeline_state(
1233 device: &metal::DeviceRef,
1234 library: &metal::LibraryRef,
1235 label: &str,
1236 vertex_fn_name: &str,
1237 fragment_fn_name: &str,
1238 pixel_format: metal::MTLPixelFormat,
1239) -> metal::RenderPipelineState {
1240 let vertex_fn = library
1241 .get_function(vertex_fn_name, None)
1242 .expect("error locating vertex function");
1243 let fragment_fn = library
1244 .get_function(fragment_fn_name, None)
1245 .expect("error locating fragment function");
1246
1247 let descriptor = metal::RenderPipelineDescriptor::new();
1248 descriptor.set_label(label);
1249 descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
1250 descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
1251 let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
1252 color_attachment.set_pixel_format(pixel_format);
1253 color_attachment.set_blending_enabled(true);
1254 color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
1255 color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
1256 color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::One);
1257 color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::One);
1258 color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
1259 color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::One);
1260
1261 device
1262 .new_render_pipeline_state(&descriptor)
1263 .expect("could not create render pipeline state")
1264}
1265
1266fn build_path_rasterization_pipeline_state(
1267 device: &metal::DeviceRef,
1268 library: &metal::LibraryRef,
1269 label: &str,
1270 vertex_fn_name: &str,
1271 fragment_fn_name: &str,
1272 pixel_format: metal::MTLPixelFormat,
1273 path_sample_count: u32,
1274) -> metal::RenderPipelineState {
1275 let vertex_fn = library
1276 .get_function(vertex_fn_name, None)
1277 .expect("error locating vertex function");
1278 let fragment_fn = library
1279 .get_function(fragment_fn_name, None)
1280 .expect("error locating fragment function");
1281
1282 let descriptor = metal::RenderPipelineDescriptor::new();
1283 descriptor.set_label(label);
1284 descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
1285 descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
1286 if path_sample_count > 1 {
1287 descriptor.set_raster_sample_count(path_sample_count as _);
1288 descriptor.set_alpha_to_coverage_enabled(false);
1289 }
1290 let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
1291 color_attachment.set_pixel_format(pixel_format);
1292 color_attachment.set_blending_enabled(true);
1293 color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
1294 color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
1295 color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::One);
1296 color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::One);
1297 color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
1298 color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
1299
1300 device
1301 .new_render_pipeline_state(&descriptor)
1302 .expect("could not create render pipeline state")
1303}
1304
1305// Align to multiples of 256 make Metal happy.
1306fn align_offset(offset: &mut usize) {
1307 *offset = (*offset).div_ceil(256) * 256;
1308}
1309
1310#[repr(C)]
1311enum ShadowInputIndex {
1312 Vertices = 0,
1313 Shadows = 1,
1314 ViewportSize = 2,
1315}
1316
1317#[repr(C)]
1318enum QuadInputIndex {
1319 Vertices = 0,
1320 Quads = 1,
1321 ViewportSize = 2,
1322}
1323
1324#[repr(C)]
1325enum UnderlineInputIndex {
1326 Vertices = 0,
1327 Underlines = 1,
1328 ViewportSize = 2,
1329}
1330
1331#[repr(C)]
1332enum SpriteInputIndex {
1333 Vertices = 0,
1334 Sprites = 1,
1335 ViewportSize = 2,
1336 AtlasTextureSize = 3,
1337 AtlasTexture = 4,
1338}
1339
1340#[repr(C)]
1341enum SurfaceInputIndex {
1342 Vertices = 0,
1343 Surfaces = 1,
1344 ViewportSize = 2,
1345 TextureSize = 3,
1346 YTexture = 4,
1347 CbCrTexture = 5,
1348}
1349
1350#[repr(C)]
1351enum PathRasterizationInputIndex {
1352 Vertices = 0,
1353 ViewportSize = 1,
1354}
1355
1356#[derive(Clone, Debug, Eq, PartialEq)]
1357#[repr(C)]
1358pub struct PathSprite {
1359 pub bounds: Bounds<ScaledPixels>,
1360}
1361
1362#[derive(Clone, Debug, Eq, PartialEq)]
1363#[repr(C)]
1364pub struct SurfaceBounds {
1365 pub bounds: Bounds<ScaledPixels>,
1366 pub content_mask: ContentMask<ScaledPixels>,
1367}