1use crate::metal_atlas::MetalAtlas;
2use anyhow::Result;
3use block::ConcreteBlock;
4use cocoa::{
5 base::{NO, YES},
6 foundation::{NSSize, NSUInteger},
7 quartzcore::AutoresizingMask,
8};
9use gpui::{
10 AtlasTextureId, Background, Bounds, ContentMask, DevicePixels, MonochromeSprite, PaintSurface,
11 Path, Point, PolychromeSprite, PrimitiveBatch, Quad, ScaledPixels, Scene, Shadow, Size,
12 Surface, Underline, point, size,
13};
14#[cfg(any(test, feature = "test-support"))]
15use image::RgbaImage;
16
17use core_foundation::base::TCFType;
18use core_video::{
19 metal_texture::CVMetalTextureGetTexture, metal_texture_cache::CVMetalTextureCache,
20 pixel_buffer::kCVPixelFormatType_420YpCbCr8BiPlanarFullRange,
21};
22use foreign_types::{ForeignType, ForeignTypeRef};
23use metal::{
24 CAMetalLayer, CommandQueue, MTLGPUFamily, MTLPixelFormat, MTLResourceOptions, NSRange,
25 RenderPassColorAttachmentDescriptorRef,
26};
27use objc::{self, msg_send, sel, sel_impl};
28use parking_lot::Mutex;
29
30use std::{cell::Cell, ffi::c_void, mem, ptr, sync::Arc};
31
32// Exported to metal
33pub(crate) type PointF = gpui::Point<f32>;
34
35#[cfg(not(feature = "runtime_shaders"))]
36const SHADERS_METALLIB: &[u8] = include_bytes!(concat!(env!("OUT_DIR"), "/shaders.metallib"));
37#[cfg(feature = "runtime_shaders")]
38const SHADERS_SOURCE_FILE: &str = include_str!(concat!(env!("OUT_DIR"), "/stitched_shaders.metal"));
39// Use 4x MSAA, all devices support it.
40// https://developer.apple.com/documentation/metal/mtldevice/1433355-supportstexturesamplecount
41const PATH_SAMPLE_COUNT: u32 = 4;
42
43pub(crate) type Context = Arc<Mutex<InstanceBufferPool>>;
44pub(crate) type Renderer = MetalRenderer;
45
46pub(crate) unsafe fn new_renderer(
47 context: self::Context,
48 _native_window: *mut c_void,
49 _native_view: *mut c_void,
50 _bounds: gpui::Size<f32>,
51 transparent: bool,
52) -> Renderer {
53 MetalRenderer::new(context, transparent)
54}
55
56pub(crate) struct InstanceBufferPool {
57 buffer_size: usize,
58 buffers: Vec<metal::Buffer>,
59}
60
61impl Default for InstanceBufferPool {
62 fn default() -> Self {
63 Self {
64 buffer_size: 2 * 1024 * 1024,
65 buffers: Vec::new(),
66 }
67 }
68}
69
70pub(crate) struct InstanceBuffer {
71 metal_buffer: metal::Buffer,
72 size: usize,
73}
74
75impl InstanceBufferPool {
76 pub(crate) fn reset(&mut self, buffer_size: usize) {
77 self.buffer_size = buffer_size;
78 self.buffers.clear();
79 }
80
81 pub(crate) fn acquire(
82 &mut self,
83 device: &metal::Device,
84 unified_memory: bool,
85 ) -> InstanceBuffer {
86 let buffer = self.buffers.pop().unwrap_or_else(|| {
87 let options = if unified_memory {
88 MTLResourceOptions::StorageModeShared
89 // Buffers are write only which can benefit from the combined cache
90 // https://developer.apple.com/documentation/metal/mtlresourceoptions/cpucachemodewritecombined
91 | MTLResourceOptions::CPUCacheModeWriteCombined
92 } else {
93 MTLResourceOptions::StorageModeManaged
94 };
95
96 device.new_buffer(self.buffer_size as u64, options)
97 });
98 InstanceBuffer {
99 metal_buffer: buffer,
100 size: self.buffer_size,
101 }
102 }
103
104 pub(crate) fn release(&mut self, buffer: InstanceBuffer) {
105 if buffer.size == self.buffer_size {
106 self.buffers.push(buffer.metal_buffer)
107 }
108 }
109}
110
111pub(crate) struct MetalRenderer {
112 device: metal::Device,
113 layer: metal::MetalLayer,
114 is_apple_gpu: bool,
115 is_unified_memory: bool,
116 presents_with_transaction: bool,
117 command_queue: CommandQueue,
118 paths_rasterization_pipeline_state: metal::RenderPipelineState,
119 path_sprites_pipeline_state: metal::RenderPipelineState,
120 shadows_pipeline_state: metal::RenderPipelineState,
121 quads_pipeline_state: metal::RenderPipelineState,
122 underlines_pipeline_state: metal::RenderPipelineState,
123 monochrome_sprites_pipeline_state: metal::RenderPipelineState,
124 polychrome_sprites_pipeline_state: metal::RenderPipelineState,
125 surfaces_pipeline_state: metal::RenderPipelineState,
126 unit_vertices: metal::Buffer,
127 #[allow(clippy::arc_with_non_send_sync)]
128 instance_buffer_pool: Arc<Mutex<InstanceBufferPool>>,
129 sprite_atlas: Arc<MetalAtlas>,
130 core_video_texture_cache: core_video::metal_texture_cache::CVMetalTextureCache,
131 path_intermediate_texture: Option<metal::Texture>,
132 path_intermediate_msaa_texture: Option<metal::Texture>,
133 path_sample_count: u32,
134}
135
136#[repr(C)]
137pub struct PathRasterizationVertex {
138 pub xy_position: Point<ScaledPixels>,
139 pub st_position: Point<f32>,
140 pub color: Background,
141 pub bounds: Bounds<ScaledPixels>,
142}
143
144impl MetalRenderer {
145 pub fn new(instance_buffer_pool: Arc<Mutex<InstanceBufferPool>>, transparent: bool) -> Self {
146 // Prefer low‐power integrated GPUs on Intel Mac. On Apple
147 // Silicon, there is only ever one GPU, so this is equivalent to
148 // `metal::Device::system_default()`.
149 let device = if let Some(d) = metal::Device::all()
150 .into_iter()
151 .min_by_key(|d| (d.is_removable(), !d.is_low_power()))
152 {
153 d
154 } else {
155 // For some reason `all()` can return an empty list, see https://github.com/zed-industries/zed/issues/37689
156 // In that case, we fall back to the system default device.
157 log::error!(
158 "Unable to enumerate Metal devices; attempting to use system default device"
159 );
160 metal::Device::system_default().unwrap_or_else(|| {
161 log::error!("unable to access a compatible graphics device");
162 std::process::exit(1);
163 })
164 };
165
166 let layer = metal::MetalLayer::new();
167 layer.set_device(&device);
168 layer.set_pixel_format(MTLPixelFormat::BGRA8Unorm);
169 // Support direct-to-display rendering if the window is not transparent
170 // https://developer.apple.com/documentation/metal/managing-your-game-window-for-metal-in-macos
171 layer.set_opaque(!transparent);
172 layer.set_maximum_drawable_count(3);
173 // Allow texture reading for visual tests (captures screenshots without ScreenCaptureKit)
174 #[cfg(any(test, feature = "test-support"))]
175 layer.set_framebuffer_only(false);
176 unsafe {
177 let _: () = msg_send![&*layer, setAllowsNextDrawableTimeout: NO];
178 let _: () = msg_send![&*layer, setNeedsDisplayOnBoundsChange: YES];
179 let _: () = msg_send![
180 &*layer,
181 setAutoresizingMask: AutoresizingMask::WIDTH_SIZABLE
182 | AutoresizingMask::HEIGHT_SIZABLE
183 ];
184 }
185 #[cfg(feature = "runtime_shaders")]
186 let library = device
187 .new_library_with_source(&SHADERS_SOURCE_FILE, &metal::CompileOptions::new())
188 .expect("error building metal library");
189 #[cfg(not(feature = "runtime_shaders"))]
190 let library = device
191 .new_library_with_data(SHADERS_METALLIB)
192 .expect("error building metal library");
193
194 fn to_float2_bits(point: PointF) -> u64 {
195 let mut output = point.y.to_bits() as u64;
196 output <<= 32;
197 output |= point.x.to_bits() as u64;
198 output
199 }
200
201 // Shared memory can be used only if CPU and GPU share the same memory space.
202 // https://developer.apple.com/documentation/metal/setting-resource-storage-modes
203 let is_unified_memory = device.has_unified_memory();
204 // Apple GPU families support memoryless textures, which can significantly reduce
205 // memory usage by keeping render targets in on-chip tile memory instead of
206 // allocating backing store in system memory.
207 // https://developer.apple.com/documentation/metal/mtlgpufamily
208 let is_apple_gpu = device.supports_family(MTLGPUFamily::Apple1);
209
210 let unit_vertices = [
211 to_float2_bits(point(0., 0.)),
212 to_float2_bits(point(1., 0.)),
213 to_float2_bits(point(0., 1.)),
214 to_float2_bits(point(0., 1.)),
215 to_float2_bits(point(1., 0.)),
216 to_float2_bits(point(1., 1.)),
217 ];
218 let unit_vertices = device.new_buffer_with_data(
219 unit_vertices.as_ptr() as *const c_void,
220 mem::size_of_val(&unit_vertices) as u64,
221 if is_unified_memory {
222 MTLResourceOptions::StorageModeShared
223 | MTLResourceOptions::CPUCacheModeWriteCombined
224 } else {
225 MTLResourceOptions::StorageModeManaged
226 },
227 );
228
229 let paths_rasterization_pipeline_state = build_path_rasterization_pipeline_state(
230 &device,
231 &library,
232 "paths_rasterization",
233 "path_rasterization_vertex",
234 "path_rasterization_fragment",
235 MTLPixelFormat::BGRA8Unorm,
236 PATH_SAMPLE_COUNT,
237 );
238 let path_sprites_pipeline_state = build_path_sprite_pipeline_state(
239 &device,
240 &library,
241 "path_sprites",
242 "path_sprite_vertex",
243 "path_sprite_fragment",
244 MTLPixelFormat::BGRA8Unorm,
245 );
246 let shadows_pipeline_state = build_pipeline_state(
247 &device,
248 &library,
249 "shadows",
250 "shadow_vertex",
251 "shadow_fragment",
252 MTLPixelFormat::BGRA8Unorm,
253 );
254 let quads_pipeline_state = build_pipeline_state(
255 &device,
256 &library,
257 "quads",
258 "quad_vertex",
259 "quad_fragment",
260 MTLPixelFormat::BGRA8Unorm,
261 );
262 let underlines_pipeline_state = build_pipeline_state(
263 &device,
264 &library,
265 "underlines",
266 "underline_vertex",
267 "underline_fragment",
268 MTLPixelFormat::BGRA8Unorm,
269 );
270 let monochrome_sprites_pipeline_state = build_pipeline_state(
271 &device,
272 &library,
273 "monochrome_sprites",
274 "monochrome_sprite_vertex",
275 "monochrome_sprite_fragment",
276 MTLPixelFormat::BGRA8Unorm,
277 );
278 let polychrome_sprites_pipeline_state = build_pipeline_state(
279 &device,
280 &library,
281 "polychrome_sprites",
282 "polychrome_sprite_vertex",
283 "polychrome_sprite_fragment",
284 MTLPixelFormat::BGRA8Unorm,
285 );
286 let surfaces_pipeline_state = build_pipeline_state(
287 &device,
288 &library,
289 "surfaces",
290 "surface_vertex",
291 "surface_fragment",
292 MTLPixelFormat::BGRA8Unorm,
293 );
294
295 let command_queue = device.new_command_queue();
296 let sprite_atlas = Arc::new(MetalAtlas::new(device.clone(), is_apple_gpu));
297 let core_video_texture_cache =
298 CVMetalTextureCache::new(None, device.clone(), None).unwrap();
299
300 Self {
301 device,
302 layer,
303 presents_with_transaction: false,
304 is_apple_gpu,
305 is_unified_memory,
306 command_queue,
307 paths_rasterization_pipeline_state,
308 path_sprites_pipeline_state,
309 shadows_pipeline_state,
310 quads_pipeline_state,
311 underlines_pipeline_state,
312 monochrome_sprites_pipeline_state,
313 polychrome_sprites_pipeline_state,
314 surfaces_pipeline_state,
315 unit_vertices,
316 instance_buffer_pool,
317 sprite_atlas,
318 core_video_texture_cache,
319 path_intermediate_texture: None,
320 path_intermediate_msaa_texture: None,
321 path_sample_count: PATH_SAMPLE_COUNT,
322 }
323 }
324
325 pub fn layer(&self) -> &metal::MetalLayerRef {
326 &self.layer
327 }
328
329 pub fn layer_ptr(&self) -> *mut CAMetalLayer {
330 self.layer.as_ptr()
331 }
332
333 pub fn sprite_atlas(&self) -> &Arc<MetalAtlas> {
334 &self.sprite_atlas
335 }
336
337 pub fn set_presents_with_transaction(&mut self, presents_with_transaction: bool) {
338 self.presents_with_transaction = presents_with_transaction;
339 self.layer
340 .set_presents_with_transaction(presents_with_transaction);
341 }
342
343 pub fn update_drawable_size(&mut self, size: Size<DevicePixels>) {
344 let size = NSSize {
345 width: size.width.0 as f64,
346 height: size.height.0 as f64,
347 };
348 unsafe {
349 let _: () = msg_send![
350 self.layer(),
351 setDrawableSize: size
352 ];
353 }
354 let device_pixels_size = Size {
355 width: DevicePixels(size.width as i32),
356 height: DevicePixels(size.height as i32),
357 };
358 self.update_path_intermediate_textures(device_pixels_size);
359 }
360
361 fn update_path_intermediate_textures(&mut self, size: Size<DevicePixels>) {
362 // We are uncertain when this happens, but sometimes size can be 0 here. Most likely before
363 // the layout pass on window creation. Zero-sized texture creation causes SIGABRT.
364 // https://github.com/zed-industries/zed/issues/36229
365 if size.width.0 <= 0 || size.height.0 <= 0 {
366 self.path_intermediate_texture = None;
367 self.path_intermediate_msaa_texture = None;
368 return;
369 }
370
371 let texture_descriptor = metal::TextureDescriptor::new();
372 texture_descriptor.set_width(size.width.0 as u64);
373 texture_descriptor.set_height(size.height.0 as u64);
374 texture_descriptor.set_pixel_format(metal::MTLPixelFormat::BGRA8Unorm);
375 texture_descriptor.set_storage_mode(metal::MTLStorageMode::Private);
376 texture_descriptor
377 .set_usage(metal::MTLTextureUsage::RenderTarget | metal::MTLTextureUsage::ShaderRead);
378 self.path_intermediate_texture = Some(self.device.new_texture(&texture_descriptor));
379
380 if self.path_sample_count > 1 {
381 // https://developer.apple.com/documentation/metal/choosing-a-resource-storage-mode-for-apple-gpus
382 // Rendering MSAA textures are done in a single pass, so we can use memory-less storage on Apple Silicon
383 let storage_mode = if self.is_apple_gpu {
384 metal::MTLStorageMode::Memoryless
385 } else {
386 metal::MTLStorageMode::Private
387 };
388
389 let msaa_descriptor = texture_descriptor;
390 msaa_descriptor.set_texture_type(metal::MTLTextureType::D2Multisample);
391 msaa_descriptor.set_storage_mode(storage_mode);
392 msaa_descriptor.set_sample_count(self.path_sample_count as _);
393 self.path_intermediate_msaa_texture = Some(self.device.new_texture(&msaa_descriptor));
394 } else {
395 self.path_intermediate_msaa_texture = None;
396 }
397 }
398
399 pub fn update_transparency(&self, transparent: bool) {
400 self.layer.set_opaque(!transparent);
401 }
402
403 pub fn destroy(&self) {
404 // nothing to do
405 }
406
407 pub fn draw(&mut self, scene: &Scene) {
408 let layer = self.layer.clone();
409 let viewport_size = layer.drawable_size();
410 let viewport_size: Size<DevicePixels> = size(
411 (viewport_size.width.ceil() as i32).into(),
412 (viewport_size.height.ceil() as i32).into(),
413 );
414 let drawable = if let Some(drawable) = layer.next_drawable() {
415 drawable
416 } else {
417 log::error!(
418 "failed to retrieve next drawable, drawable size: {:?}",
419 viewport_size
420 );
421 return;
422 };
423
424 loop {
425 let mut instance_buffer = self
426 .instance_buffer_pool
427 .lock()
428 .acquire(&self.device, self.is_unified_memory);
429
430 let command_buffer =
431 self.draw_primitives(scene, &mut instance_buffer, drawable, viewport_size);
432
433 match command_buffer {
434 Ok(command_buffer) => {
435 let instance_buffer_pool = self.instance_buffer_pool.clone();
436 let instance_buffer = Cell::new(Some(instance_buffer));
437 let block = ConcreteBlock::new(move |_| {
438 if let Some(instance_buffer) = instance_buffer.take() {
439 instance_buffer_pool.lock().release(instance_buffer);
440 }
441 });
442 let block = block.copy();
443 command_buffer.add_completed_handler(&block);
444
445 if self.presents_with_transaction {
446 command_buffer.commit();
447 command_buffer.wait_until_scheduled();
448 drawable.present();
449 } else {
450 command_buffer.present_drawable(drawable);
451 command_buffer.commit();
452 }
453 return;
454 }
455 Err(err) => {
456 log::error!(
457 "failed to render: {}. retrying with larger instance buffer size",
458 err
459 );
460 let mut instance_buffer_pool = self.instance_buffer_pool.lock();
461 let buffer_size = instance_buffer_pool.buffer_size;
462 if buffer_size >= 256 * 1024 * 1024 {
463 log::error!("instance buffer size grew too large: {}", buffer_size);
464 break;
465 }
466 instance_buffer_pool.reset(buffer_size * 2);
467 log::info!(
468 "increased instance buffer size to {}",
469 instance_buffer_pool.buffer_size
470 );
471 }
472 }
473 }
474 }
475
476 /// Renders the scene to a texture and returns the pixel data as an RGBA image.
477 /// This does not present the frame to screen - useful for visual testing
478 /// where we want to capture what would be rendered without displaying it.
479 #[cfg(any(test, feature = "test-support"))]
480 pub fn render_to_image(&mut self, scene: &Scene) -> Result<RgbaImage> {
481 let layer = self.layer.clone();
482 let viewport_size = layer.drawable_size();
483 let viewport_size: Size<DevicePixels> = size(
484 (viewport_size.width.ceil() as i32).into(),
485 (viewport_size.height.ceil() as i32).into(),
486 );
487 let drawable = layer
488 .next_drawable()
489 .ok_or_else(|| anyhow::anyhow!("Failed to get drawable for render_to_image"))?;
490
491 loop {
492 let mut instance_buffer = self
493 .instance_buffer_pool
494 .lock()
495 .acquire(&self.device, self.is_unified_memory);
496
497 let command_buffer =
498 self.draw_primitives(scene, &mut instance_buffer, drawable, viewport_size);
499
500 match command_buffer {
501 Ok(command_buffer) => {
502 let instance_buffer_pool = self.instance_buffer_pool.clone();
503 let instance_buffer = Cell::new(Some(instance_buffer));
504 let block = ConcreteBlock::new(move |_| {
505 if let Some(instance_buffer) = instance_buffer.take() {
506 instance_buffer_pool.lock().release(instance_buffer);
507 }
508 });
509 let block = block.copy();
510 command_buffer.add_completed_handler(&block);
511
512 // Commit and wait for completion without presenting
513 command_buffer.commit();
514 command_buffer.wait_until_completed();
515
516 // Read pixels from the texture
517 let texture = drawable.texture();
518 let width = texture.width() as u32;
519 let height = texture.height() as u32;
520 let bytes_per_row = width as usize * 4;
521 let buffer_size = height as usize * bytes_per_row;
522
523 let mut pixels = vec![0u8; buffer_size];
524
525 let region = metal::MTLRegion {
526 origin: metal::MTLOrigin { x: 0, y: 0, z: 0 },
527 size: metal::MTLSize {
528 width: width as u64,
529 height: height as u64,
530 depth: 1,
531 },
532 };
533
534 texture.get_bytes(
535 pixels.as_mut_ptr() as *mut std::ffi::c_void,
536 bytes_per_row as u64,
537 region,
538 0,
539 );
540
541 // Convert BGRA to RGBA (swap B and R channels)
542 for chunk in pixels.chunks_exact_mut(4) {
543 chunk.swap(0, 2);
544 }
545
546 return RgbaImage::from_raw(width, height, pixels).ok_or_else(|| {
547 anyhow::anyhow!("Failed to create RgbaImage from pixel data")
548 });
549 }
550 Err(err) => {
551 log::error!(
552 "failed to render: {}. retrying with larger instance buffer size",
553 err
554 );
555 let mut instance_buffer_pool = self.instance_buffer_pool.lock();
556 let buffer_size = instance_buffer_pool.buffer_size;
557 if buffer_size >= 256 * 1024 * 1024 {
558 anyhow::bail!("instance buffer size grew too large: {}", buffer_size);
559 }
560 instance_buffer_pool.reset(buffer_size * 2);
561 log::info!(
562 "increased instance buffer size to {}",
563 instance_buffer_pool.buffer_size
564 );
565 }
566 }
567 }
568 }
569
570 fn draw_primitives(
571 &mut self,
572 scene: &Scene,
573 instance_buffer: &mut InstanceBuffer,
574 drawable: &metal::MetalDrawableRef,
575 viewport_size: Size<DevicePixels>,
576 ) -> Result<metal::CommandBuffer> {
577 let command_queue = self.command_queue.clone();
578 let command_buffer = command_queue.new_command_buffer();
579 let alpha = if self.layer.is_opaque() { 1. } else { 0. };
580 let mut instance_offset = 0;
581
582 let mut command_encoder = new_command_encoder(
583 command_buffer,
584 drawable,
585 viewport_size,
586 |color_attachment| {
587 color_attachment.set_load_action(metal::MTLLoadAction::Clear);
588 color_attachment.set_clear_color(metal::MTLClearColor::new(0., 0., 0., alpha));
589 },
590 );
591
592 for batch in scene.batches() {
593 let ok = match batch {
594 PrimitiveBatch::Shadows(range) => self.draw_shadows(
595 &scene.shadows[range],
596 instance_buffer,
597 &mut instance_offset,
598 viewport_size,
599 command_encoder,
600 ),
601 PrimitiveBatch::Quads(range) => self.draw_quads(
602 &scene.quads[range],
603 instance_buffer,
604 &mut instance_offset,
605 viewport_size,
606 command_encoder,
607 ),
608 PrimitiveBatch::Paths(range) => {
609 let paths = &scene.paths[range];
610 command_encoder.end_encoding();
611
612 let did_draw = self.draw_paths_to_intermediate(
613 paths,
614 instance_buffer,
615 &mut instance_offset,
616 viewport_size,
617 command_buffer,
618 );
619
620 command_encoder = new_command_encoder(
621 command_buffer,
622 drawable,
623 viewport_size,
624 |color_attachment| {
625 color_attachment.set_load_action(metal::MTLLoadAction::Load);
626 },
627 );
628
629 if did_draw {
630 self.draw_paths_from_intermediate(
631 paths,
632 instance_buffer,
633 &mut instance_offset,
634 viewport_size,
635 command_encoder,
636 )
637 } else {
638 false
639 }
640 }
641 PrimitiveBatch::Underlines(range) => self.draw_underlines(
642 &scene.underlines[range],
643 instance_buffer,
644 &mut instance_offset,
645 viewport_size,
646 command_encoder,
647 ),
648 PrimitiveBatch::MonochromeSprites { texture_id, range } => self
649 .draw_monochrome_sprites(
650 texture_id,
651 &scene.monochrome_sprites[range],
652 instance_buffer,
653 &mut instance_offset,
654 viewport_size,
655 command_encoder,
656 ),
657 PrimitiveBatch::PolychromeSprites { texture_id, range } => self
658 .draw_polychrome_sprites(
659 texture_id,
660 &scene.polychrome_sprites[range],
661 instance_buffer,
662 &mut instance_offset,
663 viewport_size,
664 command_encoder,
665 ),
666 PrimitiveBatch::Surfaces(range) => self.draw_surfaces(
667 &scene.surfaces[range],
668 instance_buffer,
669 &mut instance_offset,
670 viewport_size,
671 command_encoder,
672 ),
673 PrimitiveBatch::SubpixelSprites { .. } => unreachable!(),
674 };
675 if !ok {
676 command_encoder.end_encoding();
677 anyhow::bail!(
678 "scene too large: {} paths, {} shadows, {} quads, {} underlines, {} mono, {} poly, {} surfaces",
679 scene.paths.len(),
680 scene.shadows.len(),
681 scene.quads.len(),
682 scene.underlines.len(),
683 scene.monochrome_sprites.len(),
684 scene.polychrome_sprites.len(),
685 scene.surfaces.len(),
686 );
687 }
688 }
689
690 command_encoder.end_encoding();
691
692 if !self.is_unified_memory {
693 // Sync the instance buffer to the GPU
694 instance_buffer.metal_buffer.did_modify_range(NSRange {
695 location: 0,
696 length: instance_offset as NSUInteger,
697 });
698 }
699
700 Ok(command_buffer.to_owned())
701 }
702
703 fn draw_paths_to_intermediate(
704 &self,
705 paths: &[Path<ScaledPixels>],
706 instance_buffer: &mut InstanceBuffer,
707 instance_offset: &mut usize,
708 viewport_size: Size<DevicePixels>,
709 command_buffer: &metal::CommandBufferRef,
710 ) -> bool {
711 if paths.is_empty() {
712 return true;
713 }
714 let Some(intermediate_texture) = &self.path_intermediate_texture else {
715 return false;
716 };
717
718 let render_pass_descriptor = metal::RenderPassDescriptor::new();
719 let color_attachment = render_pass_descriptor
720 .color_attachments()
721 .object_at(0)
722 .unwrap();
723 color_attachment.set_load_action(metal::MTLLoadAction::Clear);
724 color_attachment.set_clear_color(metal::MTLClearColor::new(0., 0., 0., 0.));
725
726 if let Some(msaa_texture) = &self.path_intermediate_msaa_texture {
727 color_attachment.set_texture(Some(msaa_texture));
728 color_attachment.set_resolve_texture(Some(intermediate_texture));
729 color_attachment.set_store_action(metal::MTLStoreAction::MultisampleResolve);
730 } else {
731 color_attachment.set_texture(Some(intermediate_texture));
732 color_attachment.set_store_action(metal::MTLStoreAction::Store);
733 }
734
735 let command_encoder = command_buffer.new_render_command_encoder(render_pass_descriptor);
736 command_encoder.set_render_pipeline_state(&self.paths_rasterization_pipeline_state);
737
738 align_offset(instance_offset);
739 let mut vertices = Vec::new();
740 for path in paths {
741 vertices.extend(path.vertices.iter().map(|v| PathRasterizationVertex {
742 xy_position: v.xy_position,
743 st_position: v.st_position,
744 color: path.color,
745 bounds: path.bounds.intersect(&path.content_mask.bounds),
746 }));
747 }
748 let vertices_bytes_len = mem::size_of_val(vertices.as_slice());
749 let next_offset = *instance_offset + vertices_bytes_len;
750 if next_offset > instance_buffer.size {
751 command_encoder.end_encoding();
752 return false;
753 }
754 command_encoder.set_vertex_buffer(
755 PathRasterizationInputIndex::Vertices as u64,
756 Some(&instance_buffer.metal_buffer),
757 *instance_offset as u64,
758 );
759 command_encoder.set_vertex_bytes(
760 PathRasterizationInputIndex::ViewportSize as u64,
761 mem::size_of_val(&viewport_size) as u64,
762 &viewport_size as *const Size<DevicePixels> as *const _,
763 );
764 command_encoder.set_fragment_buffer(
765 PathRasterizationInputIndex::Vertices as u64,
766 Some(&instance_buffer.metal_buffer),
767 *instance_offset as u64,
768 );
769 let buffer_contents =
770 unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
771 unsafe {
772 ptr::copy_nonoverlapping(
773 vertices.as_ptr() as *const u8,
774 buffer_contents,
775 vertices_bytes_len,
776 );
777 }
778 command_encoder.draw_primitives(
779 metal::MTLPrimitiveType::Triangle,
780 0,
781 vertices.len() as u64,
782 );
783 *instance_offset = next_offset;
784
785 command_encoder.end_encoding();
786 true
787 }
788
789 fn draw_shadows(
790 &self,
791 shadows: &[Shadow],
792 instance_buffer: &mut InstanceBuffer,
793 instance_offset: &mut usize,
794 viewport_size: Size<DevicePixels>,
795 command_encoder: &metal::RenderCommandEncoderRef,
796 ) -> bool {
797 if shadows.is_empty() {
798 return true;
799 }
800 align_offset(instance_offset);
801
802 command_encoder.set_render_pipeline_state(&self.shadows_pipeline_state);
803 command_encoder.set_vertex_buffer(
804 ShadowInputIndex::Vertices as u64,
805 Some(&self.unit_vertices),
806 0,
807 );
808 command_encoder.set_vertex_buffer(
809 ShadowInputIndex::Shadows as u64,
810 Some(&instance_buffer.metal_buffer),
811 *instance_offset as u64,
812 );
813 command_encoder.set_fragment_buffer(
814 ShadowInputIndex::Shadows as u64,
815 Some(&instance_buffer.metal_buffer),
816 *instance_offset as u64,
817 );
818
819 command_encoder.set_vertex_bytes(
820 ShadowInputIndex::ViewportSize as u64,
821 mem::size_of_val(&viewport_size) as u64,
822 &viewport_size as *const Size<DevicePixels> as *const _,
823 );
824
825 let shadow_bytes_len = mem::size_of_val(shadows);
826 let buffer_contents =
827 unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
828
829 let next_offset = *instance_offset + shadow_bytes_len;
830 if next_offset > instance_buffer.size {
831 return false;
832 }
833
834 unsafe {
835 ptr::copy_nonoverlapping(
836 shadows.as_ptr() as *const u8,
837 buffer_contents,
838 shadow_bytes_len,
839 );
840 }
841
842 command_encoder.draw_primitives_instanced(
843 metal::MTLPrimitiveType::Triangle,
844 0,
845 6,
846 shadows.len() as u64,
847 );
848 *instance_offset = next_offset;
849 true
850 }
851
852 fn draw_quads(
853 &self,
854 quads: &[Quad],
855 instance_buffer: &mut InstanceBuffer,
856 instance_offset: &mut usize,
857 viewport_size: Size<DevicePixels>,
858 command_encoder: &metal::RenderCommandEncoderRef,
859 ) -> bool {
860 if quads.is_empty() {
861 return true;
862 }
863 align_offset(instance_offset);
864
865 command_encoder.set_render_pipeline_state(&self.quads_pipeline_state);
866 command_encoder.set_vertex_buffer(
867 QuadInputIndex::Vertices as u64,
868 Some(&self.unit_vertices),
869 0,
870 );
871 command_encoder.set_vertex_buffer(
872 QuadInputIndex::Quads as u64,
873 Some(&instance_buffer.metal_buffer),
874 *instance_offset as u64,
875 );
876 command_encoder.set_fragment_buffer(
877 QuadInputIndex::Quads as u64,
878 Some(&instance_buffer.metal_buffer),
879 *instance_offset as u64,
880 );
881
882 command_encoder.set_vertex_bytes(
883 QuadInputIndex::ViewportSize as u64,
884 mem::size_of_val(&viewport_size) as u64,
885 &viewport_size as *const Size<DevicePixels> as *const _,
886 );
887
888 let quad_bytes_len = mem::size_of_val(quads);
889 let buffer_contents =
890 unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
891
892 let next_offset = *instance_offset + quad_bytes_len;
893 if next_offset > instance_buffer.size {
894 return false;
895 }
896
897 unsafe {
898 ptr::copy_nonoverlapping(quads.as_ptr() as *const u8, buffer_contents, quad_bytes_len);
899 }
900
901 command_encoder.draw_primitives_instanced(
902 metal::MTLPrimitiveType::Triangle,
903 0,
904 6,
905 quads.len() as u64,
906 );
907 *instance_offset = next_offset;
908 true
909 }
910
911 fn draw_paths_from_intermediate(
912 &self,
913 paths: &[Path<ScaledPixels>],
914 instance_buffer: &mut InstanceBuffer,
915 instance_offset: &mut usize,
916 viewport_size: Size<DevicePixels>,
917 command_encoder: &metal::RenderCommandEncoderRef,
918 ) -> bool {
919 let Some(first_path) = paths.first() else {
920 return true;
921 };
922
923 let Some(ref intermediate_texture) = self.path_intermediate_texture else {
924 return false;
925 };
926
927 command_encoder.set_render_pipeline_state(&self.path_sprites_pipeline_state);
928 command_encoder.set_vertex_buffer(
929 SpriteInputIndex::Vertices as u64,
930 Some(&self.unit_vertices),
931 0,
932 );
933 command_encoder.set_vertex_bytes(
934 SpriteInputIndex::ViewportSize as u64,
935 mem::size_of_val(&viewport_size) as u64,
936 &viewport_size as *const Size<DevicePixels> as *const _,
937 );
938
939 command_encoder.set_fragment_texture(
940 SpriteInputIndex::AtlasTexture as u64,
941 Some(intermediate_texture),
942 );
943
944 // When copying paths from the intermediate texture to the drawable,
945 // each pixel must only be copied once, in case of transparent paths.
946 //
947 // If all paths have the same draw order, then their bounds are all
948 // disjoint, so we can copy each path's bounds individually. If this
949 // batch combines different draw orders, we perform a single copy
950 // for a minimal spanning rect.
951 let sprites;
952 if paths.last().unwrap().order == first_path.order {
953 sprites = paths
954 .iter()
955 .map(|path| PathSprite {
956 bounds: path.clipped_bounds(),
957 })
958 .collect();
959 } else {
960 let mut bounds = first_path.clipped_bounds();
961 for path in paths.iter().skip(1) {
962 bounds = bounds.union(&path.clipped_bounds());
963 }
964 sprites = vec![PathSprite { bounds }];
965 }
966
967 align_offset(instance_offset);
968 let sprite_bytes_len = mem::size_of_val(sprites.as_slice());
969 let next_offset = *instance_offset + sprite_bytes_len;
970 if next_offset > instance_buffer.size {
971 return false;
972 }
973
974 command_encoder.set_vertex_buffer(
975 SpriteInputIndex::Sprites as u64,
976 Some(&instance_buffer.metal_buffer),
977 *instance_offset as u64,
978 );
979
980 let buffer_contents =
981 unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
982 unsafe {
983 ptr::copy_nonoverlapping(
984 sprites.as_ptr() as *const u8,
985 buffer_contents,
986 sprite_bytes_len,
987 );
988 }
989
990 command_encoder.draw_primitives_instanced(
991 metal::MTLPrimitiveType::Triangle,
992 0,
993 6,
994 sprites.len() as u64,
995 );
996 *instance_offset = next_offset;
997
998 true
999 }
1000
1001 fn draw_underlines(
1002 &self,
1003 underlines: &[Underline],
1004 instance_buffer: &mut InstanceBuffer,
1005 instance_offset: &mut usize,
1006 viewport_size: Size<DevicePixels>,
1007 command_encoder: &metal::RenderCommandEncoderRef,
1008 ) -> bool {
1009 if underlines.is_empty() {
1010 return true;
1011 }
1012 align_offset(instance_offset);
1013
1014 command_encoder.set_render_pipeline_state(&self.underlines_pipeline_state);
1015 command_encoder.set_vertex_buffer(
1016 UnderlineInputIndex::Vertices as u64,
1017 Some(&self.unit_vertices),
1018 0,
1019 );
1020 command_encoder.set_vertex_buffer(
1021 UnderlineInputIndex::Underlines as u64,
1022 Some(&instance_buffer.metal_buffer),
1023 *instance_offset as u64,
1024 );
1025 command_encoder.set_fragment_buffer(
1026 UnderlineInputIndex::Underlines as u64,
1027 Some(&instance_buffer.metal_buffer),
1028 *instance_offset as u64,
1029 );
1030
1031 command_encoder.set_vertex_bytes(
1032 UnderlineInputIndex::ViewportSize as u64,
1033 mem::size_of_val(&viewport_size) as u64,
1034 &viewport_size as *const Size<DevicePixels> as *const _,
1035 );
1036
1037 let underline_bytes_len = mem::size_of_val(underlines);
1038 let buffer_contents =
1039 unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
1040
1041 let next_offset = *instance_offset + underline_bytes_len;
1042 if next_offset > instance_buffer.size {
1043 return false;
1044 }
1045
1046 unsafe {
1047 ptr::copy_nonoverlapping(
1048 underlines.as_ptr() as *const u8,
1049 buffer_contents,
1050 underline_bytes_len,
1051 );
1052 }
1053
1054 command_encoder.draw_primitives_instanced(
1055 metal::MTLPrimitiveType::Triangle,
1056 0,
1057 6,
1058 underlines.len() as u64,
1059 );
1060 *instance_offset = next_offset;
1061 true
1062 }
1063
1064 fn draw_monochrome_sprites(
1065 &self,
1066 texture_id: AtlasTextureId,
1067 sprites: &[MonochromeSprite],
1068 instance_buffer: &mut InstanceBuffer,
1069 instance_offset: &mut usize,
1070 viewport_size: Size<DevicePixels>,
1071 command_encoder: &metal::RenderCommandEncoderRef,
1072 ) -> bool {
1073 if sprites.is_empty() {
1074 return true;
1075 }
1076 align_offset(instance_offset);
1077
1078 let sprite_bytes_len = mem::size_of_val(sprites);
1079 let buffer_contents =
1080 unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
1081
1082 let next_offset = *instance_offset + sprite_bytes_len;
1083 if next_offset > instance_buffer.size {
1084 return false;
1085 }
1086
1087 let texture = self.sprite_atlas.metal_texture(texture_id);
1088 let texture_size = size(
1089 DevicePixels(texture.width() as i32),
1090 DevicePixels(texture.height() as i32),
1091 );
1092 command_encoder.set_render_pipeline_state(&self.monochrome_sprites_pipeline_state);
1093 command_encoder.set_vertex_buffer(
1094 SpriteInputIndex::Vertices as u64,
1095 Some(&self.unit_vertices),
1096 0,
1097 );
1098 command_encoder.set_vertex_buffer(
1099 SpriteInputIndex::Sprites as u64,
1100 Some(&instance_buffer.metal_buffer),
1101 *instance_offset as u64,
1102 );
1103 command_encoder.set_vertex_bytes(
1104 SpriteInputIndex::ViewportSize as u64,
1105 mem::size_of_val(&viewport_size) as u64,
1106 &viewport_size as *const Size<DevicePixels> as *const _,
1107 );
1108 command_encoder.set_vertex_bytes(
1109 SpriteInputIndex::AtlasTextureSize as u64,
1110 mem::size_of_val(&texture_size) as u64,
1111 &texture_size as *const Size<DevicePixels> as *const _,
1112 );
1113 command_encoder.set_fragment_buffer(
1114 SpriteInputIndex::Sprites as u64,
1115 Some(&instance_buffer.metal_buffer),
1116 *instance_offset as u64,
1117 );
1118 command_encoder.set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
1119
1120 unsafe {
1121 ptr::copy_nonoverlapping(
1122 sprites.as_ptr() as *const u8,
1123 buffer_contents,
1124 sprite_bytes_len,
1125 );
1126 }
1127
1128 command_encoder.draw_primitives_instanced(
1129 metal::MTLPrimitiveType::Triangle,
1130 0,
1131 6,
1132 sprites.len() as u64,
1133 );
1134 *instance_offset = next_offset;
1135 true
1136 }
1137
1138 fn draw_polychrome_sprites(
1139 &self,
1140 texture_id: AtlasTextureId,
1141 sprites: &[PolychromeSprite],
1142 instance_buffer: &mut InstanceBuffer,
1143 instance_offset: &mut usize,
1144 viewport_size: Size<DevicePixels>,
1145 command_encoder: &metal::RenderCommandEncoderRef,
1146 ) -> bool {
1147 if sprites.is_empty() {
1148 return true;
1149 }
1150 align_offset(instance_offset);
1151
1152 let texture = self.sprite_atlas.metal_texture(texture_id);
1153 let texture_size = size(
1154 DevicePixels(texture.width() as i32),
1155 DevicePixels(texture.height() as i32),
1156 );
1157 command_encoder.set_render_pipeline_state(&self.polychrome_sprites_pipeline_state);
1158 command_encoder.set_vertex_buffer(
1159 SpriteInputIndex::Vertices as u64,
1160 Some(&self.unit_vertices),
1161 0,
1162 );
1163 command_encoder.set_vertex_buffer(
1164 SpriteInputIndex::Sprites as u64,
1165 Some(&instance_buffer.metal_buffer),
1166 *instance_offset as u64,
1167 );
1168 command_encoder.set_vertex_bytes(
1169 SpriteInputIndex::ViewportSize as u64,
1170 mem::size_of_val(&viewport_size) as u64,
1171 &viewport_size as *const Size<DevicePixels> as *const _,
1172 );
1173 command_encoder.set_vertex_bytes(
1174 SpriteInputIndex::AtlasTextureSize as u64,
1175 mem::size_of_val(&texture_size) as u64,
1176 &texture_size as *const Size<DevicePixels> as *const _,
1177 );
1178 command_encoder.set_fragment_buffer(
1179 SpriteInputIndex::Sprites as u64,
1180 Some(&instance_buffer.metal_buffer),
1181 *instance_offset as u64,
1182 );
1183 command_encoder.set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
1184
1185 let sprite_bytes_len = mem::size_of_val(sprites);
1186 let buffer_contents =
1187 unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
1188
1189 let next_offset = *instance_offset + sprite_bytes_len;
1190 if next_offset > instance_buffer.size {
1191 return false;
1192 }
1193
1194 unsafe {
1195 ptr::copy_nonoverlapping(
1196 sprites.as_ptr() as *const u8,
1197 buffer_contents,
1198 sprite_bytes_len,
1199 );
1200 }
1201
1202 command_encoder.draw_primitives_instanced(
1203 metal::MTLPrimitiveType::Triangle,
1204 0,
1205 6,
1206 sprites.len() as u64,
1207 );
1208 *instance_offset = next_offset;
1209 true
1210 }
1211
1212 fn draw_surfaces(
1213 &mut self,
1214 surfaces: &[PaintSurface],
1215 instance_buffer: &mut InstanceBuffer,
1216 instance_offset: &mut usize,
1217 viewport_size: Size<DevicePixels>,
1218 command_encoder: &metal::RenderCommandEncoderRef,
1219 ) -> bool {
1220 command_encoder.set_render_pipeline_state(&self.surfaces_pipeline_state);
1221 command_encoder.set_vertex_buffer(
1222 SurfaceInputIndex::Vertices as u64,
1223 Some(&self.unit_vertices),
1224 0,
1225 );
1226 command_encoder.set_vertex_bytes(
1227 SurfaceInputIndex::ViewportSize as u64,
1228 mem::size_of_val(&viewport_size) as u64,
1229 &viewport_size as *const Size<DevicePixels> as *const _,
1230 );
1231
1232 for surface in surfaces {
1233 let texture_size = size(
1234 DevicePixels::from(surface.image_buffer.get_width() as i32),
1235 DevicePixels::from(surface.image_buffer.get_height() as i32),
1236 );
1237
1238 assert_eq!(
1239 surface.image_buffer.get_pixel_format(),
1240 kCVPixelFormatType_420YpCbCr8BiPlanarFullRange
1241 );
1242
1243 let y_texture = self
1244 .core_video_texture_cache
1245 .create_texture_from_image(
1246 surface.image_buffer.as_concrete_TypeRef(),
1247 None,
1248 MTLPixelFormat::R8Unorm,
1249 surface.image_buffer.get_width_of_plane(0),
1250 surface.image_buffer.get_height_of_plane(0),
1251 0,
1252 )
1253 .unwrap();
1254 let cb_cr_texture = self
1255 .core_video_texture_cache
1256 .create_texture_from_image(
1257 surface.image_buffer.as_concrete_TypeRef(),
1258 None,
1259 MTLPixelFormat::RG8Unorm,
1260 surface.image_buffer.get_width_of_plane(1),
1261 surface.image_buffer.get_height_of_plane(1),
1262 1,
1263 )
1264 .unwrap();
1265
1266 align_offset(instance_offset);
1267 let next_offset = *instance_offset + mem::size_of::<Surface>();
1268 if next_offset > instance_buffer.size {
1269 return false;
1270 }
1271
1272 command_encoder.set_vertex_buffer(
1273 SurfaceInputIndex::Surfaces as u64,
1274 Some(&instance_buffer.metal_buffer),
1275 *instance_offset as u64,
1276 );
1277 command_encoder.set_vertex_bytes(
1278 SurfaceInputIndex::TextureSize as u64,
1279 mem::size_of_val(&texture_size) as u64,
1280 &texture_size as *const Size<DevicePixels> as *const _,
1281 );
1282 // let y_texture = y_texture.get_texture().unwrap().
1283 command_encoder.set_fragment_texture(SurfaceInputIndex::YTexture as u64, unsafe {
1284 let texture = CVMetalTextureGetTexture(y_texture.as_concrete_TypeRef());
1285 Some(metal::TextureRef::from_ptr(texture as *mut _))
1286 });
1287 command_encoder.set_fragment_texture(SurfaceInputIndex::CbCrTexture as u64, unsafe {
1288 let texture = CVMetalTextureGetTexture(cb_cr_texture.as_concrete_TypeRef());
1289 Some(metal::TextureRef::from_ptr(texture as *mut _))
1290 });
1291
1292 unsafe {
1293 let buffer_contents = (instance_buffer.metal_buffer.contents() as *mut u8)
1294 .add(*instance_offset)
1295 as *mut SurfaceBounds;
1296 ptr::write(
1297 buffer_contents,
1298 SurfaceBounds {
1299 bounds: surface.bounds,
1300 content_mask: surface.content_mask.clone(),
1301 },
1302 );
1303 }
1304
1305 command_encoder.draw_primitives(metal::MTLPrimitiveType::Triangle, 0, 6);
1306 *instance_offset = next_offset;
1307 }
1308 true
1309 }
1310}
1311
1312fn new_command_encoder<'a>(
1313 command_buffer: &'a metal::CommandBufferRef,
1314 drawable: &'a metal::MetalDrawableRef,
1315 viewport_size: Size<DevicePixels>,
1316 configure_color_attachment: impl Fn(&RenderPassColorAttachmentDescriptorRef),
1317) -> &'a metal::RenderCommandEncoderRef {
1318 let render_pass_descriptor = metal::RenderPassDescriptor::new();
1319 let color_attachment = render_pass_descriptor
1320 .color_attachments()
1321 .object_at(0)
1322 .unwrap();
1323 color_attachment.set_texture(Some(drawable.texture()));
1324 color_attachment.set_store_action(metal::MTLStoreAction::Store);
1325 configure_color_attachment(color_attachment);
1326
1327 let command_encoder = command_buffer.new_render_command_encoder(render_pass_descriptor);
1328 command_encoder.set_viewport(metal::MTLViewport {
1329 originX: 0.0,
1330 originY: 0.0,
1331 width: i32::from(viewport_size.width) as f64,
1332 height: i32::from(viewport_size.height) as f64,
1333 znear: 0.0,
1334 zfar: 1.0,
1335 });
1336 command_encoder
1337}
1338
1339fn build_pipeline_state(
1340 device: &metal::DeviceRef,
1341 library: &metal::LibraryRef,
1342 label: &str,
1343 vertex_fn_name: &str,
1344 fragment_fn_name: &str,
1345 pixel_format: metal::MTLPixelFormat,
1346) -> metal::RenderPipelineState {
1347 let vertex_fn = library
1348 .get_function(vertex_fn_name, None)
1349 .expect("error locating vertex function");
1350 let fragment_fn = library
1351 .get_function(fragment_fn_name, None)
1352 .expect("error locating fragment function");
1353
1354 let descriptor = metal::RenderPipelineDescriptor::new();
1355 descriptor.set_label(label);
1356 descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
1357 descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
1358 let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
1359 color_attachment.set_pixel_format(pixel_format);
1360 color_attachment.set_blending_enabled(true);
1361 color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
1362 color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
1363 color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::SourceAlpha);
1364 color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::One);
1365 color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
1366 color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::One);
1367
1368 device
1369 .new_render_pipeline_state(&descriptor)
1370 .expect("could not create render pipeline state")
1371}
1372
1373fn build_path_sprite_pipeline_state(
1374 device: &metal::DeviceRef,
1375 library: &metal::LibraryRef,
1376 label: &str,
1377 vertex_fn_name: &str,
1378 fragment_fn_name: &str,
1379 pixel_format: metal::MTLPixelFormat,
1380) -> metal::RenderPipelineState {
1381 let vertex_fn = library
1382 .get_function(vertex_fn_name, None)
1383 .expect("error locating vertex function");
1384 let fragment_fn = library
1385 .get_function(fragment_fn_name, None)
1386 .expect("error locating fragment function");
1387
1388 let descriptor = metal::RenderPipelineDescriptor::new();
1389 descriptor.set_label(label);
1390 descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
1391 descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
1392 let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
1393 color_attachment.set_pixel_format(pixel_format);
1394 color_attachment.set_blending_enabled(true);
1395 color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
1396 color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
1397 color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::One);
1398 color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::One);
1399 color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
1400 color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::One);
1401
1402 device
1403 .new_render_pipeline_state(&descriptor)
1404 .expect("could not create render pipeline state")
1405}
1406
1407fn build_path_rasterization_pipeline_state(
1408 device: &metal::DeviceRef,
1409 library: &metal::LibraryRef,
1410 label: &str,
1411 vertex_fn_name: &str,
1412 fragment_fn_name: &str,
1413 pixel_format: metal::MTLPixelFormat,
1414 path_sample_count: u32,
1415) -> metal::RenderPipelineState {
1416 let vertex_fn = library
1417 .get_function(vertex_fn_name, None)
1418 .expect("error locating vertex function");
1419 let fragment_fn = library
1420 .get_function(fragment_fn_name, None)
1421 .expect("error locating fragment function");
1422
1423 let descriptor = metal::RenderPipelineDescriptor::new();
1424 descriptor.set_label(label);
1425 descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
1426 descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
1427 if path_sample_count > 1 {
1428 descriptor.set_raster_sample_count(path_sample_count as _);
1429 descriptor.set_alpha_to_coverage_enabled(false);
1430 }
1431 let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
1432 color_attachment.set_pixel_format(pixel_format);
1433 color_attachment.set_blending_enabled(true);
1434 color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
1435 color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
1436 color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::One);
1437 color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::One);
1438 color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
1439 color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
1440
1441 device
1442 .new_render_pipeline_state(&descriptor)
1443 .expect("could not create render pipeline state")
1444}
1445
1446// Align to multiples of 256 make Metal happy.
1447fn align_offset(offset: &mut usize) {
1448 *offset = (*offset).div_ceil(256) * 256;
1449}
1450
1451#[repr(C)]
1452enum ShadowInputIndex {
1453 Vertices = 0,
1454 Shadows = 1,
1455 ViewportSize = 2,
1456}
1457
1458#[repr(C)]
1459enum QuadInputIndex {
1460 Vertices = 0,
1461 Quads = 1,
1462 ViewportSize = 2,
1463}
1464
1465#[repr(C)]
1466enum UnderlineInputIndex {
1467 Vertices = 0,
1468 Underlines = 1,
1469 ViewportSize = 2,
1470}
1471
1472#[repr(C)]
1473enum SpriteInputIndex {
1474 Vertices = 0,
1475 Sprites = 1,
1476 ViewportSize = 2,
1477 AtlasTextureSize = 3,
1478 AtlasTexture = 4,
1479}
1480
1481#[repr(C)]
1482enum SurfaceInputIndex {
1483 Vertices = 0,
1484 Surfaces = 1,
1485 ViewportSize = 2,
1486 TextureSize = 3,
1487 YTexture = 4,
1488 CbCrTexture = 5,
1489}
1490
1491#[repr(C)]
1492enum PathRasterizationInputIndex {
1493 Vertices = 0,
1494 ViewportSize = 1,
1495}
1496
1497#[derive(Clone, Debug, Eq, PartialEq)]
1498#[repr(C)]
1499pub struct PathSprite {
1500 pub bounds: Bounds<ScaledPixels>,
1501}
1502
1503#[derive(Clone, Debug, Eq, PartialEq)]
1504#[repr(C)]
1505pub struct SurfaceBounds {
1506 pub bounds: Bounds<ScaledPixels>,
1507 pub content_mask: ContentMask<ScaledPixels>,
1508}