1use super::metal_atlas::MetalAtlas;
2use crate::{
3 AtlasTextureId, Background, Bounds, ContentMask, DevicePixels, MonochromeSprite, PaintSurface,
4 Path, Point, PolychromeSprite, PrimitiveBatch, Quad, ScaledPixels, Scene, Shadow, Size,
5 Surface, Underline, point, size,
6};
7use anyhow::Result;
8use block::ConcreteBlock;
9use cocoa::{
10 base::{NO, YES},
11 foundation::{NSSize, NSUInteger},
12 quartzcore::AutoresizingMask,
13};
14#[cfg(any(test, feature = "test-support"))]
15use image::RgbaImage;
16
17use core_foundation::base::TCFType;
18use core_video::{
19 metal_texture::CVMetalTextureGetTexture, metal_texture_cache::CVMetalTextureCache,
20 pixel_buffer::kCVPixelFormatType_420YpCbCr8BiPlanarFullRange,
21};
22use foreign_types::{ForeignType, ForeignTypeRef};
23use metal::{
24 CAMetalLayer, CommandQueue, MTLPixelFormat, MTLResourceOptions, NSRange,
25 RenderPassColorAttachmentDescriptorRef,
26};
27use objc::{self, msg_send, sel, sel_impl};
28use parking_lot::Mutex;
29
30use std::{cell::Cell, ffi::c_void, mem, ptr, sync::Arc};
31
32// Exported to metal
33pub(crate) type PointF = crate::Point<f32>;
34
35#[cfg(not(feature = "runtime_shaders"))]
36const SHADERS_METALLIB: &[u8] = include_bytes!(concat!(env!("OUT_DIR"), "/shaders.metallib"));
37#[cfg(feature = "runtime_shaders")]
38const SHADERS_SOURCE_FILE: &str = include_str!(concat!(env!("OUT_DIR"), "/stitched_shaders.metal"));
39// Use 4x MSAA, all devices support it.
40// https://developer.apple.com/documentation/metal/mtldevice/1433355-supportstexturesamplecount
41const PATH_SAMPLE_COUNT: u32 = 4;
42
43pub type Context = Arc<Mutex<InstanceBufferPool>>;
44pub type Renderer = MetalRenderer;
45
46pub unsafe fn new_renderer(
47 context: self::Context,
48 _native_window: *mut c_void,
49 _native_view: *mut c_void,
50 _bounds: crate::Size<f32>,
51 transparent: bool,
52) -> Renderer {
53 MetalRenderer::new(context, transparent)
54}
55
56pub(crate) struct InstanceBufferPool {
57 buffer_size: usize,
58 buffers: Vec<metal::Buffer>,
59}
60
61impl Default for InstanceBufferPool {
62 fn default() -> Self {
63 Self {
64 buffer_size: 2 * 1024 * 1024,
65 buffers: Vec::new(),
66 }
67 }
68}
69
70pub(crate) struct InstanceBuffer {
71 metal_buffer: metal::Buffer,
72 size: usize,
73}
74
75impl InstanceBufferPool {
76 pub(crate) fn reset(&mut self, buffer_size: usize) {
77 self.buffer_size = buffer_size;
78 self.buffers.clear();
79 }
80
81 pub(crate) fn acquire(&mut self, device: &metal::Device) -> InstanceBuffer {
82 let buffer = self.buffers.pop().unwrap_or_else(|| {
83 device.new_buffer(
84 self.buffer_size as u64,
85 MTLResourceOptions::StorageModeManaged,
86 )
87 });
88 InstanceBuffer {
89 metal_buffer: buffer,
90 size: self.buffer_size,
91 }
92 }
93
94 pub(crate) fn release(&mut self, buffer: InstanceBuffer) {
95 if buffer.size == self.buffer_size {
96 self.buffers.push(buffer.metal_buffer)
97 }
98 }
99}
100
101pub(crate) struct MetalRenderer {
102 device: metal::Device,
103 layer: metal::MetalLayer,
104 presents_with_transaction: bool,
105 command_queue: CommandQueue,
106 paths_rasterization_pipeline_state: metal::RenderPipelineState,
107 path_sprites_pipeline_state: metal::RenderPipelineState,
108 shadows_pipeline_state: metal::RenderPipelineState,
109 quads_pipeline_state: metal::RenderPipelineState,
110 underlines_pipeline_state: metal::RenderPipelineState,
111 monochrome_sprites_pipeline_state: metal::RenderPipelineState,
112 polychrome_sprites_pipeline_state: metal::RenderPipelineState,
113 surfaces_pipeline_state: metal::RenderPipelineState,
114 unit_vertices: metal::Buffer,
115 #[allow(clippy::arc_with_non_send_sync)]
116 instance_buffer_pool: Arc<Mutex<InstanceBufferPool>>,
117 sprite_atlas: Arc<MetalAtlas>,
118 core_video_texture_cache: core_video::metal_texture_cache::CVMetalTextureCache,
119 path_intermediate_texture: Option<metal::Texture>,
120 path_intermediate_msaa_texture: Option<metal::Texture>,
121 path_sample_count: u32,
122}
123
124#[repr(C)]
125pub struct PathRasterizationVertex {
126 pub xy_position: Point<ScaledPixels>,
127 pub st_position: Point<f32>,
128 pub color: Background,
129 pub bounds: Bounds<ScaledPixels>,
130}
131
132impl MetalRenderer {
133 pub fn new(instance_buffer_pool: Arc<Mutex<InstanceBufferPool>>, transparent: bool) -> Self {
134 // Prefer low‐power integrated GPUs on Intel Mac. On Apple
135 // Silicon, there is only ever one GPU, so this is equivalent to
136 // `metal::Device::system_default()`.
137 let device = if let Some(d) = metal::Device::all()
138 .into_iter()
139 .min_by_key(|d| (d.is_removable(), !d.is_low_power()))
140 {
141 d
142 } else {
143 // For some reason `all()` can return an empty list, see https://github.com/zed-industries/zed/issues/37689
144 // In that case, we fall back to the system default device.
145 log::error!(
146 "Unable to enumerate Metal devices; attempting to use system default device"
147 );
148 metal::Device::system_default().unwrap_or_else(|| {
149 log::error!("unable to access a compatible graphics device");
150 std::process::exit(1);
151 })
152 };
153
154 let layer = metal::MetalLayer::new();
155 layer.set_device(&device);
156 layer.set_pixel_format(MTLPixelFormat::BGRA8Unorm);
157 // Support direct-to-display rendering if the window is not transparent
158 // https://developer.apple.com/documentation/metal/managing-your-game-window-for-metal-in-macos
159 layer.set_opaque(!transparent);
160 layer.set_maximum_drawable_count(3);
161 // Allow texture reading for visual tests (captures screenshots without ScreenCaptureKit)
162 #[cfg(any(test, feature = "test-support"))]
163 layer.set_framebuffer_only(false);
164 unsafe {
165 let _: () = msg_send![&*layer, setAllowsNextDrawableTimeout: NO];
166 let _: () = msg_send![&*layer, setNeedsDisplayOnBoundsChange: YES];
167 let _: () = msg_send![
168 &*layer,
169 setAutoresizingMask: AutoresizingMask::WIDTH_SIZABLE
170 | AutoresizingMask::HEIGHT_SIZABLE
171 ];
172 }
173 #[cfg(feature = "runtime_shaders")]
174 let library = device
175 .new_library_with_source(&SHADERS_SOURCE_FILE, &metal::CompileOptions::new())
176 .expect("error building metal library");
177 #[cfg(not(feature = "runtime_shaders"))]
178 let library = device
179 .new_library_with_data(SHADERS_METALLIB)
180 .expect("error building metal library");
181
182 fn to_float2_bits(point: PointF) -> u64 {
183 let mut output = point.y.to_bits() as u64;
184 output <<= 32;
185 output |= point.x.to_bits() as u64;
186 output
187 }
188
189 let unit_vertices = [
190 to_float2_bits(point(0., 0.)),
191 to_float2_bits(point(1., 0.)),
192 to_float2_bits(point(0., 1.)),
193 to_float2_bits(point(0., 1.)),
194 to_float2_bits(point(1., 0.)),
195 to_float2_bits(point(1., 1.)),
196 ];
197 let unit_vertices = device.new_buffer_with_data(
198 unit_vertices.as_ptr() as *const c_void,
199 mem::size_of_val(&unit_vertices) as u64,
200 MTLResourceOptions::StorageModeManaged,
201 );
202
203 let paths_rasterization_pipeline_state = build_path_rasterization_pipeline_state(
204 &device,
205 &library,
206 "paths_rasterization",
207 "path_rasterization_vertex",
208 "path_rasterization_fragment",
209 MTLPixelFormat::BGRA8Unorm,
210 PATH_SAMPLE_COUNT,
211 );
212 let path_sprites_pipeline_state = build_path_sprite_pipeline_state(
213 &device,
214 &library,
215 "path_sprites",
216 "path_sprite_vertex",
217 "path_sprite_fragment",
218 MTLPixelFormat::BGRA8Unorm,
219 );
220 let shadows_pipeline_state = build_pipeline_state(
221 &device,
222 &library,
223 "shadows",
224 "shadow_vertex",
225 "shadow_fragment",
226 MTLPixelFormat::BGRA8Unorm,
227 );
228 let quads_pipeline_state = build_pipeline_state(
229 &device,
230 &library,
231 "quads",
232 "quad_vertex",
233 "quad_fragment",
234 MTLPixelFormat::BGRA8Unorm,
235 );
236 let underlines_pipeline_state = build_pipeline_state(
237 &device,
238 &library,
239 "underlines",
240 "underline_vertex",
241 "underline_fragment",
242 MTLPixelFormat::BGRA8Unorm,
243 );
244 let monochrome_sprites_pipeline_state = build_pipeline_state(
245 &device,
246 &library,
247 "monochrome_sprites",
248 "monochrome_sprite_vertex",
249 "monochrome_sprite_fragment",
250 MTLPixelFormat::BGRA8Unorm,
251 );
252 let polychrome_sprites_pipeline_state = build_pipeline_state(
253 &device,
254 &library,
255 "polychrome_sprites",
256 "polychrome_sprite_vertex",
257 "polychrome_sprite_fragment",
258 MTLPixelFormat::BGRA8Unorm,
259 );
260 let surfaces_pipeline_state = build_pipeline_state(
261 &device,
262 &library,
263 "surfaces",
264 "surface_vertex",
265 "surface_fragment",
266 MTLPixelFormat::BGRA8Unorm,
267 );
268
269 let command_queue = device.new_command_queue();
270 let sprite_atlas = Arc::new(MetalAtlas::new(device.clone()));
271 let core_video_texture_cache =
272 CVMetalTextureCache::new(None, device.clone(), None).unwrap();
273
274 Self {
275 device,
276 layer,
277 presents_with_transaction: false,
278 command_queue,
279 paths_rasterization_pipeline_state,
280 path_sprites_pipeline_state,
281 shadows_pipeline_state,
282 quads_pipeline_state,
283 underlines_pipeline_state,
284 monochrome_sprites_pipeline_state,
285 polychrome_sprites_pipeline_state,
286 surfaces_pipeline_state,
287 unit_vertices,
288 instance_buffer_pool,
289 sprite_atlas,
290 core_video_texture_cache,
291 path_intermediate_texture: None,
292 path_intermediate_msaa_texture: None,
293 path_sample_count: PATH_SAMPLE_COUNT,
294 }
295 }
296
297 pub fn layer(&self) -> &metal::MetalLayerRef {
298 &self.layer
299 }
300
301 pub fn layer_ptr(&self) -> *mut CAMetalLayer {
302 self.layer.as_ptr()
303 }
304
305 pub fn sprite_atlas(&self) -> &Arc<MetalAtlas> {
306 &self.sprite_atlas
307 }
308
309 pub fn set_presents_with_transaction(&mut self, presents_with_transaction: bool) {
310 self.presents_with_transaction = presents_with_transaction;
311 self.layer
312 .set_presents_with_transaction(presents_with_transaction);
313 }
314
315 pub fn update_drawable_size(&mut self, size: Size<DevicePixels>) {
316 let size = NSSize {
317 width: size.width.0 as f64,
318 height: size.height.0 as f64,
319 };
320 unsafe {
321 let _: () = msg_send![
322 self.layer(),
323 setDrawableSize: size
324 ];
325 }
326 let device_pixels_size = Size {
327 width: DevicePixels(size.width as i32),
328 height: DevicePixels(size.height as i32),
329 };
330 self.update_path_intermediate_textures(device_pixels_size);
331 }
332
333 fn update_path_intermediate_textures(&mut self, size: Size<DevicePixels>) {
334 // We are uncertain when this happens, but sometimes size can be 0 here. Most likely before
335 // the layout pass on window creation. Zero-sized texture creation causes SIGABRT.
336 // https://github.com/zed-industries/zed/issues/36229
337 if size.width.0 <= 0 || size.height.0 <= 0 {
338 self.path_intermediate_texture = None;
339 self.path_intermediate_msaa_texture = None;
340 return;
341 }
342
343 let texture_descriptor = metal::TextureDescriptor::new();
344 texture_descriptor.set_width(size.width.0 as u64);
345 texture_descriptor.set_height(size.height.0 as u64);
346 texture_descriptor.set_pixel_format(metal::MTLPixelFormat::BGRA8Unorm);
347 texture_descriptor
348 .set_usage(metal::MTLTextureUsage::RenderTarget | metal::MTLTextureUsage::ShaderRead);
349 self.path_intermediate_texture = Some(self.device.new_texture(&texture_descriptor));
350
351 if self.path_sample_count > 1 {
352 let mut msaa_descriptor = texture_descriptor;
353 msaa_descriptor.set_texture_type(metal::MTLTextureType::D2Multisample);
354 msaa_descriptor.set_storage_mode(metal::MTLStorageMode::Private);
355 msaa_descriptor.set_sample_count(self.path_sample_count as _);
356 self.path_intermediate_msaa_texture = Some(self.device.new_texture(&msaa_descriptor));
357 } else {
358 self.path_intermediate_msaa_texture = None;
359 }
360 }
361
362 pub fn update_transparency(&self, transparent: bool) {
363 self.layer.set_opaque(!transparent);
364 }
365
366 pub fn destroy(&self) {
367 // nothing to do
368 }
369
370 pub fn draw(&mut self, scene: &Scene) {
371 let layer = self.layer.clone();
372 let viewport_size = layer.drawable_size();
373 let viewport_size: Size<DevicePixels> = size(
374 (viewport_size.width.ceil() as i32).into(),
375 (viewport_size.height.ceil() as i32).into(),
376 );
377 let drawable = if let Some(drawable) = layer.next_drawable() {
378 drawable
379 } else {
380 log::error!(
381 "failed to retrieve next drawable, drawable size: {:?}",
382 viewport_size
383 );
384 return;
385 };
386
387 loop {
388 let mut instance_buffer = self.instance_buffer_pool.lock().acquire(&self.device);
389
390 let command_buffer =
391 self.draw_primitives(scene, &mut instance_buffer, drawable, viewport_size);
392
393 match command_buffer {
394 Ok(command_buffer) => {
395 let instance_buffer_pool = self.instance_buffer_pool.clone();
396 let instance_buffer = Cell::new(Some(instance_buffer));
397 let block = ConcreteBlock::new(move |_| {
398 if let Some(instance_buffer) = instance_buffer.take() {
399 instance_buffer_pool.lock().release(instance_buffer);
400 }
401 });
402 let block = block.copy();
403 command_buffer.add_completed_handler(&block);
404
405 if self.presents_with_transaction {
406 command_buffer.commit();
407 command_buffer.wait_until_scheduled();
408 drawable.present();
409 } else {
410 command_buffer.present_drawable(drawable);
411 command_buffer.commit();
412 }
413 return;
414 }
415 Err(err) => {
416 log::error!(
417 "failed to render: {}. retrying with larger instance buffer size",
418 err
419 );
420 let mut instance_buffer_pool = self.instance_buffer_pool.lock();
421 let buffer_size = instance_buffer_pool.buffer_size;
422 if buffer_size >= 256 * 1024 * 1024 {
423 log::error!("instance buffer size grew too large: {}", buffer_size);
424 break;
425 }
426 instance_buffer_pool.reset(buffer_size * 2);
427 log::info!(
428 "increased instance buffer size to {}",
429 instance_buffer_pool.buffer_size
430 );
431 }
432 }
433 }
434 }
435
436 /// Renders the scene to a texture and returns the pixel data as an RGBA image.
437 /// This does not present the frame to screen - useful for visual testing
438 /// where we want to capture what would be rendered without displaying it.
439 #[cfg(any(test, feature = "test-support"))]
440 pub fn render_to_image(&mut self, scene: &Scene) -> Result<RgbaImage> {
441 let layer = self.layer.clone();
442 let viewport_size = layer.drawable_size();
443 let viewport_size: Size<DevicePixels> = size(
444 (viewport_size.width.ceil() as i32).into(),
445 (viewport_size.height.ceil() as i32).into(),
446 );
447 let drawable = layer
448 .next_drawable()
449 .ok_or_else(|| anyhow::anyhow!("Failed to get drawable for render_to_image"))?;
450
451 loop {
452 let mut instance_buffer = self.instance_buffer_pool.lock().acquire(&self.device);
453
454 let command_buffer =
455 self.draw_primitives(scene, &mut instance_buffer, drawable, viewport_size);
456
457 match command_buffer {
458 Ok(command_buffer) => {
459 let instance_buffer_pool = self.instance_buffer_pool.clone();
460 let instance_buffer = Cell::new(Some(instance_buffer));
461 let block = ConcreteBlock::new(move |_| {
462 if let Some(instance_buffer) = instance_buffer.take() {
463 instance_buffer_pool.lock().release(instance_buffer);
464 }
465 });
466 let block = block.copy();
467 command_buffer.add_completed_handler(&block);
468
469 // Commit and wait for completion without presenting
470 command_buffer.commit();
471 command_buffer.wait_until_completed();
472
473 // Read pixels from the texture
474 let texture = drawable.texture();
475 let width = texture.width() as u32;
476 let height = texture.height() as u32;
477 let bytes_per_row = width as usize * 4;
478 let buffer_size = height as usize * bytes_per_row;
479
480 let mut pixels = vec![0u8; buffer_size];
481
482 let region = metal::MTLRegion {
483 origin: metal::MTLOrigin { x: 0, y: 0, z: 0 },
484 size: metal::MTLSize {
485 width: width as u64,
486 height: height as u64,
487 depth: 1,
488 },
489 };
490
491 texture.get_bytes(
492 pixels.as_mut_ptr() as *mut std::ffi::c_void,
493 bytes_per_row as u64,
494 region,
495 0,
496 );
497
498 // Convert BGRA to RGBA (swap B and R channels)
499 for chunk in pixels.chunks_exact_mut(4) {
500 chunk.swap(0, 2);
501 }
502
503 return RgbaImage::from_raw(width, height, pixels).ok_or_else(|| {
504 anyhow::anyhow!("Failed to create RgbaImage from pixel data")
505 });
506 }
507 Err(err) => {
508 log::error!(
509 "failed to render: {}. retrying with larger instance buffer size",
510 err
511 );
512 let mut instance_buffer_pool = self.instance_buffer_pool.lock();
513 let buffer_size = instance_buffer_pool.buffer_size;
514 if buffer_size >= 256 * 1024 * 1024 {
515 anyhow::bail!("instance buffer size grew too large: {}", buffer_size);
516 }
517 instance_buffer_pool.reset(buffer_size * 2);
518 log::info!(
519 "increased instance buffer size to {}",
520 instance_buffer_pool.buffer_size
521 );
522 }
523 }
524 }
525 }
526
527 fn draw_primitives(
528 &mut self,
529 scene: &Scene,
530 instance_buffer: &mut InstanceBuffer,
531 drawable: &metal::MetalDrawableRef,
532 viewport_size: Size<DevicePixels>,
533 ) -> Result<metal::CommandBuffer> {
534 let command_queue = self.command_queue.clone();
535 let command_buffer = command_queue.new_command_buffer();
536 let alpha = if self.layer.is_opaque() { 1. } else { 0. };
537 let mut instance_offset = 0;
538
539 let mut command_encoder = new_command_encoder(
540 command_buffer,
541 drawable,
542 viewport_size,
543 |color_attachment| {
544 color_attachment.set_load_action(metal::MTLLoadAction::Clear);
545 color_attachment.set_clear_color(metal::MTLClearColor::new(0., 0., 0., alpha));
546 },
547 );
548
549 for batch in scene.batches() {
550 let ok = match batch {
551 PrimitiveBatch::Shadows(shadows) => self.draw_shadows(
552 shadows,
553 instance_buffer,
554 &mut instance_offset,
555 viewport_size,
556 command_encoder,
557 ),
558 PrimitiveBatch::Quads(quads) => self.draw_quads(
559 quads,
560 instance_buffer,
561 &mut instance_offset,
562 viewport_size,
563 command_encoder,
564 ),
565 PrimitiveBatch::Paths(paths) => {
566 command_encoder.end_encoding();
567
568 let did_draw = self.draw_paths_to_intermediate(
569 paths,
570 instance_buffer,
571 &mut instance_offset,
572 viewport_size,
573 command_buffer,
574 );
575
576 command_encoder = new_command_encoder(
577 command_buffer,
578 drawable,
579 viewport_size,
580 |color_attachment| {
581 color_attachment.set_load_action(metal::MTLLoadAction::Load);
582 },
583 );
584
585 if did_draw {
586 self.draw_paths_from_intermediate(
587 paths,
588 instance_buffer,
589 &mut instance_offset,
590 viewport_size,
591 command_encoder,
592 )
593 } else {
594 false
595 }
596 }
597 PrimitiveBatch::Underlines(underlines) => self.draw_underlines(
598 underlines,
599 instance_buffer,
600 &mut instance_offset,
601 viewport_size,
602 command_encoder,
603 ),
604 PrimitiveBatch::MonochromeSprites {
605 texture_id,
606 sprites,
607 } => self.draw_monochrome_sprites(
608 texture_id,
609 sprites,
610 instance_buffer,
611 &mut instance_offset,
612 viewport_size,
613 command_encoder,
614 ),
615 PrimitiveBatch::PolychromeSprites {
616 texture_id,
617 sprites,
618 } => self.draw_polychrome_sprites(
619 texture_id,
620 sprites,
621 instance_buffer,
622 &mut instance_offset,
623 viewport_size,
624 command_encoder,
625 ),
626 PrimitiveBatch::Surfaces(surfaces) => self.draw_surfaces(
627 surfaces,
628 instance_buffer,
629 &mut instance_offset,
630 viewport_size,
631 command_encoder,
632 ),
633 PrimitiveBatch::SubpixelSprites { .. } => unreachable!(),
634 };
635 if !ok {
636 command_encoder.end_encoding();
637 anyhow::bail!(
638 "scene too large: {} paths, {} shadows, {} quads, {} underlines, {} mono, {} poly, {} surfaces",
639 scene.paths.len(),
640 scene.shadows.len(),
641 scene.quads.len(),
642 scene.underlines.len(),
643 scene.monochrome_sprites.len(),
644 scene.polychrome_sprites.len(),
645 scene.surfaces.len(),
646 );
647 }
648 }
649
650 command_encoder.end_encoding();
651
652 instance_buffer.metal_buffer.did_modify_range(NSRange {
653 location: 0,
654 length: instance_offset as NSUInteger,
655 });
656 Ok(command_buffer.to_owned())
657 }
658
659 fn draw_paths_to_intermediate(
660 &self,
661 paths: &[Path<ScaledPixels>],
662 instance_buffer: &mut InstanceBuffer,
663 instance_offset: &mut usize,
664 viewport_size: Size<DevicePixels>,
665 command_buffer: &metal::CommandBufferRef,
666 ) -> bool {
667 if paths.is_empty() {
668 return true;
669 }
670 let Some(intermediate_texture) = &self.path_intermediate_texture else {
671 return false;
672 };
673
674 let render_pass_descriptor = metal::RenderPassDescriptor::new();
675 let color_attachment = render_pass_descriptor
676 .color_attachments()
677 .object_at(0)
678 .unwrap();
679 color_attachment.set_load_action(metal::MTLLoadAction::Clear);
680 color_attachment.set_clear_color(metal::MTLClearColor::new(0., 0., 0., 0.));
681
682 if let Some(msaa_texture) = &self.path_intermediate_msaa_texture {
683 color_attachment.set_texture(Some(msaa_texture));
684 color_attachment.set_resolve_texture(Some(intermediate_texture));
685 color_attachment.set_store_action(metal::MTLStoreAction::MultisampleResolve);
686 } else {
687 color_attachment.set_texture(Some(intermediate_texture));
688 color_attachment.set_store_action(metal::MTLStoreAction::Store);
689 }
690
691 let command_encoder = command_buffer.new_render_command_encoder(render_pass_descriptor);
692 command_encoder.set_render_pipeline_state(&self.paths_rasterization_pipeline_state);
693
694 align_offset(instance_offset);
695 let mut vertices = Vec::new();
696 for path in paths {
697 vertices.extend(path.vertices.iter().map(|v| PathRasterizationVertex {
698 xy_position: v.xy_position,
699 st_position: v.st_position,
700 color: path.color,
701 bounds: path.bounds.intersect(&path.content_mask.bounds),
702 }));
703 }
704 let vertices_bytes_len = mem::size_of_val(vertices.as_slice());
705 let next_offset = *instance_offset + vertices_bytes_len;
706 if next_offset > instance_buffer.size {
707 command_encoder.end_encoding();
708 return false;
709 }
710 command_encoder.set_vertex_buffer(
711 PathRasterizationInputIndex::Vertices as u64,
712 Some(&instance_buffer.metal_buffer),
713 *instance_offset as u64,
714 );
715 command_encoder.set_vertex_bytes(
716 PathRasterizationInputIndex::ViewportSize as u64,
717 mem::size_of_val(&viewport_size) as u64,
718 &viewport_size as *const Size<DevicePixels> as *const _,
719 );
720 command_encoder.set_fragment_buffer(
721 PathRasterizationInputIndex::Vertices as u64,
722 Some(&instance_buffer.metal_buffer),
723 *instance_offset as u64,
724 );
725 let buffer_contents =
726 unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
727 unsafe {
728 ptr::copy_nonoverlapping(
729 vertices.as_ptr() as *const u8,
730 buffer_contents,
731 vertices_bytes_len,
732 );
733 }
734 command_encoder.draw_primitives(
735 metal::MTLPrimitiveType::Triangle,
736 0,
737 vertices.len() as u64,
738 );
739 *instance_offset = next_offset;
740
741 command_encoder.end_encoding();
742 true
743 }
744
745 fn draw_shadows(
746 &self,
747 shadows: &[Shadow],
748 instance_buffer: &mut InstanceBuffer,
749 instance_offset: &mut usize,
750 viewport_size: Size<DevicePixels>,
751 command_encoder: &metal::RenderCommandEncoderRef,
752 ) -> bool {
753 if shadows.is_empty() {
754 return true;
755 }
756 align_offset(instance_offset);
757
758 command_encoder.set_render_pipeline_state(&self.shadows_pipeline_state);
759 command_encoder.set_vertex_buffer(
760 ShadowInputIndex::Vertices as u64,
761 Some(&self.unit_vertices),
762 0,
763 );
764 command_encoder.set_vertex_buffer(
765 ShadowInputIndex::Shadows as u64,
766 Some(&instance_buffer.metal_buffer),
767 *instance_offset as u64,
768 );
769 command_encoder.set_fragment_buffer(
770 ShadowInputIndex::Shadows as u64,
771 Some(&instance_buffer.metal_buffer),
772 *instance_offset as u64,
773 );
774
775 command_encoder.set_vertex_bytes(
776 ShadowInputIndex::ViewportSize as u64,
777 mem::size_of_val(&viewport_size) as u64,
778 &viewport_size as *const Size<DevicePixels> as *const _,
779 );
780
781 let shadow_bytes_len = mem::size_of_val(shadows);
782 let buffer_contents =
783 unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
784
785 let next_offset = *instance_offset + shadow_bytes_len;
786 if next_offset > instance_buffer.size {
787 return false;
788 }
789
790 unsafe {
791 ptr::copy_nonoverlapping(
792 shadows.as_ptr() as *const u8,
793 buffer_contents,
794 shadow_bytes_len,
795 );
796 }
797
798 command_encoder.draw_primitives_instanced(
799 metal::MTLPrimitiveType::Triangle,
800 0,
801 6,
802 shadows.len() as u64,
803 );
804 *instance_offset = next_offset;
805 true
806 }
807
808 fn draw_quads(
809 &self,
810 quads: &[Quad],
811 instance_buffer: &mut InstanceBuffer,
812 instance_offset: &mut usize,
813 viewport_size: Size<DevicePixels>,
814 command_encoder: &metal::RenderCommandEncoderRef,
815 ) -> bool {
816 if quads.is_empty() {
817 return true;
818 }
819 align_offset(instance_offset);
820
821 command_encoder.set_render_pipeline_state(&self.quads_pipeline_state);
822 command_encoder.set_vertex_buffer(
823 QuadInputIndex::Vertices as u64,
824 Some(&self.unit_vertices),
825 0,
826 );
827 command_encoder.set_vertex_buffer(
828 QuadInputIndex::Quads as u64,
829 Some(&instance_buffer.metal_buffer),
830 *instance_offset as u64,
831 );
832 command_encoder.set_fragment_buffer(
833 QuadInputIndex::Quads as u64,
834 Some(&instance_buffer.metal_buffer),
835 *instance_offset as u64,
836 );
837
838 command_encoder.set_vertex_bytes(
839 QuadInputIndex::ViewportSize as u64,
840 mem::size_of_val(&viewport_size) as u64,
841 &viewport_size as *const Size<DevicePixels> as *const _,
842 );
843
844 let quad_bytes_len = mem::size_of_val(quads);
845 let buffer_contents =
846 unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
847
848 let next_offset = *instance_offset + quad_bytes_len;
849 if next_offset > instance_buffer.size {
850 return false;
851 }
852
853 unsafe {
854 ptr::copy_nonoverlapping(quads.as_ptr() as *const u8, buffer_contents, quad_bytes_len);
855 }
856
857 command_encoder.draw_primitives_instanced(
858 metal::MTLPrimitiveType::Triangle,
859 0,
860 6,
861 quads.len() as u64,
862 );
863 *instance_offset = next_offset;
864 true
865 }
866
867 fn draw_paths_from_intermediate(
868 &self,
869 paths: &[Path<ScaledPixels>],
870 instance_buffer: &mut InstanceBuffer,
871 instance_offset: &mut usize,
872 viewport_size: Size<DevicePixels>,
873 command_encoder: &metal::RenderCommandEncoderRef,
874 ) -> bool {
875 let Some(first_path) = paths.first() else {
876 return true;
877 };
878
879 let Some(ref intermediate_texture) = self.path_intermediate_texture else {
880 return false;
881 };
882
883 command_encoder.set_render_pipeline_state(&self.path_sprites_pipeline_state);
884 command_encoder.set_vertex_buffer(
885 SpriteInputIndex::Vertices as u64,
886 Some(&self.unit_vertices),
887 0,
888 );
889 command_encoder.set_vertex_bytes(
890 SpriteInputIndex::ViewportSize as u64,
891 mem::size_of_val(&viewport_size) as u64,
892 &viewport_size as *const Size<DevicePixels> as *const _,
893 );
894
895 command_encoder.set_fragment_texture(
896 SpriteInputIndex::AtlasTexture as u64,
897 Some(intermediate_texture),
898 );
899
900 // When copying paths from the intermediate texture to the drawable,
901 // each pixel must only be copied once, in case of transparent paths.
902 //
903 // If all paths have the same draw order, then their bounds are all
904 // disjoint, so we can copy each path's bounds individually. If this
905 // batch combines different draw orders, we perform a single copy
906 // for a minimal spanning rect.
907 let sprites;
908 if paths.last().unwrap().order == first_path.order {
909 sprites = paths
910 .iter()
911 .map(|path| PathSprite {
912 bounds: path.clipped_bounds(),
913 })
914 .collect();
915 } else {
916 let mut bounds = first_path.clipped_bounds();
917 for path in paths.iter().skip(1) {
918 bounds = bounds.union(&path.clipped_bounds());
919 }
920 sprites = vec![PathSprite { bounds }];
921 }
922
923 align_offset(instance_offset);
924 let sprite_bytes_len = mem::size_of_val(sprites.as_slice());
925 let next_offset = *instance_offset + sprite_bytes_len;
926 if next_offset > instance_buffer.size {
927 return false;
928 }
929
930 command_encoder.set_vertex_buffer(
931 SpriteInputIndex::Sprites as u64,
932 Some(&instance_buffer.metal_buffer),
933 *instance_offset as u64,
934 );
935
936 let buffer_contents =
937 unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
938 unsafe {
939 ptr::copy_nonoverlapping(
940 sprites.as_ptr() as *const u8,
941 buffer_contents,
942 sprite_bytes_len,
943 );
944 }
945
946 command_encoder.draw_primitives_instanced(
947 metal::MTLPrimitiveType::Triangle,
948 0,
949 6,
950 sprites.len() as u64,
951 );
952 *instance_offset = next_offset;
953
954 true
955 }
956
957 fn draw_underlines(
958 &self,
959 underlines: &[Underline],
960 instance_buffer: &mut InstanceBuffer,
961 instance_offset: &mut usize,
962 viewport_size: Size<DevicePixels>,
963 command_encoder: &metal::RenderCommandEncoderRef,
964 ) -> bool {
965 if underlines.is_empty() {
966 return true;
967 }
968 align_offset(instance_offset);
969
970 command_encoder.set_render_pipeline_state(&self.underlines_pipeline_state);
971 command_encoder.set_vertex_buffer(
972 UnderlineInputIndex::Vertices as u64,
973 Some(&self.unit_vertices),
974 0,
975 );
976 command_encoder.set_vertex_buffer(
977 UnderlineInputIndex::Underlines as u64,
978 Some(&instance_buffer.metal_buffer),
979 *instance_offset as u64,
980 );
981 command_encoder.set_fragment_buffer(
982 UnderlineInputIndex::Underlines as u64,
983 Some(&instance_buffer.metal_buffer),
984 *instance_offset as u64,
985 );
986
987 command_encoder.set_vertex_bytes(
988 UnderlineInputIndex::ViewportSize as u64,
989 mem::size_of_val(&viewport_size) as u64,
990 &viewport_size as *const Size<DevicePixels> as *const _,
991 );
992
993 let underline_bytes_len = mem::size_of_val(underlines);
994 let buffer_contents =
995 unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
996
997 let next_offset = *instance_offset + underline_bytes_len;
998 if next_offset > instance_buffer.size {
999 return false;
1000 }
1001
1002 unsafe {
1003 ptr::copy_nonoverlapping(
1004 underlines.as_ptr() as *const u8,
1005 buffer_contents,
1006 underline_bytes_len,
1007 );
1008 }
1009
1010 command_encoder.draw_primitives_instanced(
1011 metal::MTLPrimitiveType::Triangle,
1012 0,
1013 6,
1014 underlines.len() as u64,
1015 );
1016 *instance_offset = next_offset;
1017 true
1018 }
1019
1020 fn draw_monochrome_sprites(
1021 &self,
1022 texture_id: AtlasTextureId,
1023 sprites: &[MonochromeSprite],
1024 instance_buffer: &mut InstanceBuffer,
1025 instance_offset: &mut usize,
1026 viewport_size: Size<DevicePixels>,
1027 command_encoder: &metal::RenderCommandEncoderRef,
1028 ) -> bool {
1029 if sprites.is_empty() {
1030 return true;
1031 }
1032 align_offset(instance_offset);
1033
1034 let sprite_bytes_len = mem::size_of_val(sprites);
1035 let buffer_contents =
1036 unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
1037
1038 let next_offset = *instance_offset + sprite_bytes_len;
1039 if next_offset > instance_buffer.size {
1040 return false;
1041 }
1042
1043 let texture = self.sprite_atlas.metal_texture(texture_id);
1044 let texture_size = size(
1045 DevicePixels(texture.width() as i32),
1046 DevicePixels(texture.height() as i32),
1047 );
1048 command_encoder.set_render_pipeline_state(&self.monochrome_sprites_pipeline_state);
1049 command_encoder.set_vertex_buffer(
1050 SpriteInputIndex::Vertices as u64,
1051 Some(&self.unit_vertices),
1052 0,
1053 );
1054 command_encoder.set_vertex_buffer(
1055 SpriteInputIndex::Sprites as u64,
1056 Some(&instance_buffer.metal_buffer),
1057 *instance_offset as u64,
1058 );
1059 command_encoder.set_vertex_bytes(
1060 SpriteInputIndex::ViewportSize as u64,
1061 mem::size_of_val(&viewport_size) as u64,
1062 &viewport_size as *const Size<DevicePixels> as *const _,
1063 );
1064 command_encoder.set_vertex_bytes(
1065 SpriteInputIndex::AtlasTextureSize as u64,
1066 mem::size_of_val(&texture_size) as u64,
1067 &texture_size as *const Size<DevicePixels> as *const _,
1068 );
1069 command_encoder.set_fragment_buffer(
1070 SpriteInputIndex::Sprites as u64,
1071 Some(&instance_buffer.metal_buffer),
1072 *instance_offset as u64,
1073 );
1074 command_encoder.set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
1075
1076 unsafe {
1077 ptr::copy_nonoverlapping(
1078 sprites.as_ptr() as *const u8,
1079 buffer_contents,
1080 sprite_bytes_len,
1081 );
1082 }
1083
1084 command_encoder.draw_primitives_instanced(
1085 metal::MTLPrimitiveType::Triangle,
1086 0,
1087 6,
1088 sprites.len() as u64,
1089 );
1090 *instance_offset = next_offset;
1091 true
1092 }
1093
1094 fn draw_polychrome_sprites(
1095 &self,
1096 texture_id: AtlasTextureId,
1097 sprites: &[PolychromeSprite],
1098 instance_buffer: &mut InstanceBuffer,
1099 instance_offset: &mut usize,
1100 viewport_size: Size<DevicePixels>,
1101 command_encoder: &metal::RenderCommandEncoderRef,
1102 ) -> bool {
1103 if sprites.is_empty() {
1104 return true;
1105 }
1106 align_offset(instance_offset);
1107
1108 let texture = self.sprite_atlas.metal_texture(texture_id);
1109 let texture_size = size(
1110 DevicePixels(texture.width() as i32),
1111 DevicePixels(texture.height() as i32),
1112 );
1113 command_encoder.set_render_pipeline_state(&self.polychrome_sprites_pipeline_state);
1114 command_encoder.set_vertex_buffer(
1115 SpriteInputIndex::Vertices as u64,
1116 Some(&self.unit_vertices),
1117 0,
1118 );
1119 command_encoder.set_vertex_buffer(
1120 SpriteInputIndex::Sprites as u64,
1121 Some(&instance_buffer.metal_buffer),
1122 *instance_offset as u64,
1123 );
1124 command_encoder.set_vertex_bytes(
1125 SpriteInputIndex::ViewportSize as u64,
1126 mem::size_of_val(&viewport_size) as u64,
1127 &viewport_size as *const Size<DevicePixels> as *const _,
1128 );
1129 command_encoder.set_vertex_bytes(
1130 SpriteInputIndex::AtlasTextureSize as u64,
1131 mem::size_of_val(&texture_size) as u64,
1132 &texture_size as *const Size<DevicePixels> as *const _,
1133 );
1134 command_encoder.set_fragment_buffer(
1135 SpriteInputIndex::Sprites as u64,
1136 Some(&instance_buffer.metal_buffer),
1137 *instance_offset as u64,
1138 );
1139 command_encoder.set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
1140
1141 let sprite_bytes_len = mem::size_of_val(sprites);
1142 let buffer_contents =
1143 unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
1144
1145 let next_offset = *instance_offset + sprite_bytes_len;
1146 if next_offset > instance_buffer.size {
1147 return false;
1148 }
1149
1150 unsafe {
1151 ptr::copy_nonoverlapping(
1152 sprites.as_ptr() as *const u8,
1153 buffer_contents,
1154 sprite_bytes_len,
1155 );
1156 }
1157
1158 command_encoder.draw_primitives_instanced(
1159 metal::MTLPrimitiveType::Triangle,
1160 0,
1161 6,
1162 sprites.len() as u64,
1163 );
1164 *instance_offset = next_offset;
1165 true
1166 }
1167
1168 fn draw_surfaces(
1169 &mut self,
1170 surfaces: &[PaintSurface],
1171 instance_buffer: &mut InstanceBuffer,
1172 instance_offset: &mut usize,
1173 viewport_size: Size<DevicePixels>,
1174 command_encoder: &metal::RenderCommandEncoderRef,
1175 ) -> bool {
1176 command_encoder.set_render_pipeline_state(&self.surfaces_pipeline_state);
1177 command_encoder.set_vertex_buffer(
1178 SurfaceInputIndex::Vertices as u64,
1179 Some(&self.unit_vertices),
1180 0,
1181 );
1182 command_encoder.set_vertex_bytes(
1183 SurfaceInputIndex::ViewportSize as u64,
1184 mem::size_of_val(&viewport_size) as u64,
1185 &viewport_size as *const Size<DevicePixels> as *const _,
1186 );
1187
1188 for surface in surfaces {
1189 let texture_size = size(
1190 DevicePixels::from(surface.image_buffer.get_width() as i32),
1191 DevicePixels::from(surface.image_buffer.get_height() as i32),
1192 );
1193
1194 assert_eq!(
1195 surface.image_buffer.get_pixel_format(),
1196 kCVPixelFormatType_420YpCbCr8BiPlanarFullRange
1197 );
1198
1199 let y_texture = self
1200 .core_video_texture_cache
1201 .create_texture_from_image(
1202 surface.image_buffer.as_concrete_TypeRef(),
1203 None,
1204 MTLPixelFormat::R8Unorm,
1205 surface.image_buffer.get_width_of_plane(0),
1206 surface.image_buffer.get_height_of_plane(0),
1207 0,
1208 )
1209 .unwrap();
1210 let cb_cr_texture = self
1211 .core_video_texture_cache
1212 .create_texture_from_image(
1213 surface.image_buffer.as_concrete_TypeRef(),
1214 None,
1215 MTLPixelFormat::RG8Unorm,
1216 surface.image_buffer.get_width_of_plane(1),
1217 surface.image_buffer.get_height_of_plane(1),
1218 1,
1219 )
1220 .unwrap();
1221
1222 align_offset(instance_offset);
1223 let next_offset = *instance_offset + mem::size_of::<Surface>();
1224 if next_offset > instance_buffer.size {
1225 return false;
1226 }
1227
1228 command_encoder.set_vertex_buffer(
1229 SurfaceInputIndex::Surfaces as u64,
1230 Some(&instance_buffer.metal_buffer),
1231 *instance_offset as u64,
1232 );
1233 command_encoder.set_vertex_bytes(
1234 SurfaceInputIndex::TextureSize as u64,
1235 mem::size_of_val(&texture_size) as u64,
1236 &texture_size as *const Size<DevicePixels> as *const _,
1237 );
1238 // let y_texture = y_texture.get_texture().unwrap().
1239 command_encoder.set_fragment_texture(SurfaceInputIndex::YTexture as u64, unsafe {
1240 let texture = CVMetalTextureGetTexture(y_texture.as_concrete_TypeRef());
1241 Some(metal::TextureRef::from_ptr(texture as *mut _))
1242 });
1243 command_encoder.set_fragment_texture(SurfaceInputIndex::CbCrTexture as u64, unsafe {
1244 let texture = CVMetalTextureGetTexture(cb_cr_texture.as_concrete_TypeRef());
1245 Some(metal::TextureRef::from_ptr(texture as *mut _))
1246 });
1247
1248 unsafe {
1249 let buffer_contents = (instance_buffer.metal_buffer.contents() as *mut u8)
1250 .add(*instance_offset)
1251 as *mut SurfaceBounds;
1252 ptr::write(
1253 buffer_contents,
1254 SurfaceBounds {
1255 bounds: surface.bounds,
1256 content_mask: surface.content_mask.clone(),
1257 },
1258 );
1259 }
1260
1261 command_encoder.draw_primitives(metal::MTLPrimitiveType::Triangle, 0, 6);
1262 *instance_offset = next_offset;
1263 }
1264 true
1265 }
1266}
1267
1268fn new_command_encoder<'a>(
1269 command_buffer: &'a metal::CommandBufferRef,
1270 drawable: &'a metal::MetalDrawableRef,
1271 viewport_size: Size<DevicePixels>,
1272 configure_color_attachment: impl Fn(&RenderPassColorAttachmentDescriptorRef),
1273) -> &'a metal::RenderCommandEncoderRef {
1274 let render_pass_descriptor = metal::RenderPassDescriptor::new();
1275 let color_attachment = render_pass_descriptor
1276 .color_attachments()
1277 .object_at(0)
1278 .unwrap();
1279 color_attachment.set_texture(Some(drawable.texture()));
1280 color_attachment.set_store_action(metal::MTLStoreAction::Store);
1281 configure_color_attachment(color_attachment);
1282
1283 let command_encoder = command_buffer.new_render_command_encoder(render_pass_descriptor);
1284 command_encoder.set_viewport(metal::MTLViewport {
1285 originX: 0.0,
1286 originY: 0.0,
1287 width: i32::from(viewport_size.width) as f64,
1288 height: i32::from(viewport_size.height) as f64,
1289 znear: 0.0,
1290 zfar: 1.0,
1291 });
1292 command_encoder
1293}
1294
1295fn build_pipeline_state(
1296 device: &metal::DeviceRef,
1297 library: &metal::LibraryRef,
1298 label: &str,
1299 vertex_fn_name: &str,
1300 fragment_fn_name: &str,
1301 pixel_format: metal::MTLPixelFormat,
1302) -> metal::RenderPipelineState {
1303 let vertex_fn = library
1304 .get_function(vertex_fn_name, None)
1305 .expect("error locating vertex function");
1306 let fragment_fn = library
1307 .get_function(fragment_fn_name, None)
1308 .expect("error locating fragment function");
1309
1310 let descriptor = metal::RenderPipelineDescriptor::new();
1311 descriptor.set_label(label);
1312 descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
1313 descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
1314 let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
1315 color_attachment.set_pixel_format(pixel_format);
1316 color_attachment.set_blending_enabled(true);
1317 color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
1318 color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
1319 color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::SourceAlpha);
1320 color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::One);
1321 color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
1322 color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::One);
1323
1324 device
1325 .new_render_pipeline_state(&descriptor)
1326 .expect("could not create render pipeline state")
1327}
1328
1329fn build_path_sprite_pipeline_state(
1330 device: &metal::DeviceRef,
1331 library: &metal::LibraryRef,
1332 label: &str,
1333 vertex_fn_name: &str,
1334 fragment_fn_name: &str,
1335 pixel_format: metal::MTLPixelFormat,
1336) -> metal::RenderPipelineState {
1337 let vertex_fn = library
1338 .get_function(vertex_fn_name, None)
1339 .expect("error locating vertex function");
1340 let fragment_fn = library
1341 .get_function(fragment_fn_name, None)
1342 .expect("error locating fragment function");
1343
1344 let descriptor = metal::RenderPipelineDescriptor::new();
1345 descriptor.set_label(label);
1346 descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
1347 descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
1348 let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
1349 color_attachment.set_pixel_format(pixel_format);
1350 color_attachment.set_blending_enabled(true);
1351 color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
1352 color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
1353 color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::One);
1354 color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::One);
1355 color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
1356 color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::One);
1357
1358 device
1359 .new_render_pipeline_state(&descriptor)
1360 .expect("could not create render pipeline state")
1361}
1362
1363fn build_path_rasterization_pipeline_state(
1364 device: &metal::DeviceRef,
1365 library: &metal::LibraryRef,
1366 label: &str,
1367 vertex_fn_name: &str,
1368 fragment_fn_name: &str,
1369 pixel_format: metal::MTLPixelFormat,
1370 path_sample_count: u32,
1371) -> metal::RenderPipelineState {
1372 let vertex_fn = library
1373 .get_function(vertex_fn_name, None)
1374 .expect("error locating vertex function");
1375 let fragment_fn = library
1376 .get_function(fragment_fn_name, None)
1377 .expect("error locating fragment function");
1378
1379 let descriptor = metal::RenderPipelineDescriptor::new();
1380 descriptor.set_label(label);
1381 descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
1382 descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
1383 if path_sample_count > 1 {
1384 descriptor.set_raster_sample_count(path_sample_count as _);
1385 descriptor.set_alpha_to_coverage_enabled(false);
1386 }
1387 let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
1388 color_attachment.set_pixel_format(pixel_format);
1389 color_attachment.set_blending_enabled(true);
1390 color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
1391 color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
1392 color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::One);
1393 color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::One);
1394 color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
1395 color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
1396
1397 device
1398 .new_render_pipeline_state(&descriptor)
1399 .expect("could not create render pipeline state")
1400}
1401
1402// Align to multiples of 256 make Metal happy.
1403fn align_offset(offset: &mut usize) {
1404 *offset = (*offset).div_ceil(256) * 256;
1405}
1406
1407#[repr(C)]
1408enum ShadowInputIndex {
1409 Vertices = 0,
1410 Shadows = 1,
1411 ViewportSize = 2,
1412}
1413
1414#[repr(C)]
1415enum QuadInputIndex {
1416 Vertices = 0,
1417 Quads = 1,
1418 ViewportSize = 2,
1419}
1420
1421#[repr(C)]
1422enum UnderlineInputIndex {
1423 Vertices = 0,
1424 Underlines = 1,
1425 ViewportSize = 2,
1426}
1427
1428#[repr(C)]
1429enum SpriteInputIndex {
1430 Vertices = 0,
1431 Sprites = 1,
1432 ViewportSize = 2,
1433 AtlasTextureSize = 3,
1434 AtlasTexture = 4,
1435}
1436
1437#[repr(C)]
1438enum SurfaceInputIndex {
1439 Vertices = 0,
1440 Surfaces = 1,
1441 ViewportSize = 2,
1442 TextureSize = 3,
1443 YTexture = 4,
1444 CbCrTexture = 5,
1445}
1446
1447#[repr(C)]
1448enum PathRasterizationInputIndex {
1449 Vertices = 0,
1450 ViewportSize = 1,
1451}
1452
1453#[derive(Clone, Debug, Eq, PartialEq)]
1454#[repr(C)]
1455pub struct PathSprite {
1456 pub bounds: Bounds<ScaledPixels>,
1457}
1458
1459#[derive(Clone, Debug, Eq, PartialEq)]
1460#[repr(C)]
1461pub struct SurfaceBounds {
1462 pub bounds: Bounds<ScaledPixels>,
1463 pub content_mask: ContentMask<ScaledPixels>,
1464}