1use super::metal_atlas::MetalAtlas;
2use crate::{
3 AtlasTextureId, Background, Bounds, ContentMask, DevicePixels, MonochromeSprite, PaintSurface,
4 Path, Point, PolychromeSprite, PrimitiveBatch, Quad, ScaledPixels, Scene, Shadow, Size,
5 Surface, Underline, point, size,
6};
7use anyhow::Result;
8use block::ConcreteBlock;
9use cocoa::{
10 base::{NO, YES},
11 foundation::{NSSize, NSUInteger},
12 quartzcore::AutoresizingMask,
13};
14
15use core_foundation::base::TCFType;
16use core_video::{
17 metal_texture::CVMetalTextureGetTexture, metal_texture_cache::CVMetalTextureCache,
18 pixel_buffer::kCVPixelFormatType_420YpCbCr8BiPlanarFullRange,
19};
20use foreign_types::{ForeignType, ForeignTypeRef};
21use metal::{
22 CAMetalLayer, CommandQueue, MTLPixelFormat, MTLResourceOptions, NSRange,
23 RenderPassColorAttachmentDescriptorRef,
24};
25use objc::{self, msg_send, sel, sel_impl};
26use parking_lot::Mutex;
27
28use std::{cell::Cell, ffi::c_void, mem, ptr, sync::Arc};
29
30// Exported to metal
31pub(crate) type PointF = crate::Point<f32>;
32
33#[cfg(not(feature = "runtime_shaders"))]
34const SHADERS_METALLIB: &[u8] = include_bytes!(concat!(env!("OUT_DIR"), "/shaders.metallib"));
35#[cfg(feature = "runtime_shaders")]
36const SHADERS_SOURCE_FILE: &str = include_str!(concat!(env!("OUT_DIR"), "/stitched_shaders.metal"));
37// Use 4x MSAA, all devices support it.
38// https://developer.apple.com/documentation/metal/mtldevice/1433355-supportstexturesamplecount
39const PATH_SAMPLE_COUNT: u32 = 4;
40
41pub type Context = Arc<Mutex<InstanceBufferPool>>;
42pub type Renderer = MetalRenderer;
43
44pub unsafe fn new_renderer(
45 context: self::Context,
46 _native_window: *mut c_void,
47 _native_view: *mut c_void,
48 _bounds: crate::Size<f32>,
49 _transparent: bool,
50) -> Renderer {
51 MetalRenderer::new(context)
52}
53
54pub(crate) struct InstanceBufferPool {
55 buffer_size: usize,
56 buffers: Vec<metal::Buffer>,
57}
58
59impl Default for InstanceBufferPool {
60 fn default() -> Self {
61 Self {
62 buffer_size: 2 * 1024 * 1024,
63 buffers: Vec::new(),
64 }
65 }
66}
67
68pub(crate) struct InstanceBuffer {
69 metal_buffer: metal::Buffer,
70 size: usize,
71}
72
73impl InstanceBufferPool {
74 pub(crate) fn reset(&mut self, buffer_size: usize) {
75 self.buffer_size = buffer_size;
76 self.buffers.clear();
77 }
78
79 pub(crate) fn acquire(&mut self, device: &metal::Device) -> InstanceBuffer {
80 let buffer = self.buffers.pop().unwrap_or_else(|| {
81 device.new_buffer(
82 self.buffer_size as u64,
83 MTLResourceOptions::StorageModeManaged,
84 )
85 });
86 InstanceBuffer {
87 metal_buffer: buffer,
88 size: self.buffer_size,
89 }
90 }
91
92 pub(crate) fn release(&mut self, buffer: InstanceBuffer) {
93 if buffer.size == self.buffer_size {
94 self.buffers.push(buffer.metal_buffer)
95 }
96 }
97}
98
99pub(crate) struct MetalRenderer {
100 device: metal::Device,
101 layer: metal::MetalLayer,
102 presents_with_transaction: bool,
103 command_queue: CommandQueue,
104 paths_rasterization_pipeline_state: metal::RenderPipelineState,
105 path_sprites_pipeline_state: metal::RenderPipelineState,
106 shadows_pipeline_state: metal::RenderPipelineState,
107 quads_pipeline_state: metal::RenderPipelineState,
108 underlines_pipeline_state: metal::RenderPipelineState,
109 monochrome_sprites_pipeline_state: metal::RenderPipelineState,
110 polychrome_sprites_pipeline_state: metal::RenderPipelineState,
111 surfaces_pipeline_state: metal::RenderPipelineState,
112 unit_vertices: metal::Buffer,
113 #[allow(clippy::arc_with_non_send_sync)]
114 instance_buffer_pool: Arc<Mutex<InstanceBufferPool>>,
115 sprite_atlas: Arc<MetalAtlas>,
116 core_video_texture_cache: core_video::metal_texture_cache::CVMetalTextureCache,
117 path_intermediate_texture: Option<metal::Texture>,
118 path_intermediate_msaa_texture: Option<metal::Texture>,
119 path_sample_count: u32,
120}
121
122#[repr(C)]
123pub struct PathRasterizationVertex {
124 pub xy_position: Point<ScaledPixels>,
125 pub st_position: Point<f32>,
126 pub color: Background,
127 pub bounds: Bounds<ScaledPixels>,
128}
129
130impl MetalRenderer {
131 pub fn new(instance_buffer_pool: Arc<Mutex<InstanceBufferPool>>) -> Self {
132 // Prefer low‐power integrated GPUs on Intel Mac. On Apple
133 // Silicon, there is only ever one GPU, so this is equivalent to
134 // `metal::Device::system_default()`.
135 let device = if let Some(d) = metal::Device::all()
136 .into_iter()
137 .min_by_key(|d| (d.is_removable(), !d.is_low_power()))
138 {
139 d
140 } else {
141 // For some reason `all()` can return an empty list, see https://github.com/zed-industries/zed/issues/37689
142 // In that case, we fall back to the system default device.
143 log::error!(
144 "Unable to enumerate Metal devices; attempting to use system default device"
145 );
146 metal::Device::system_default().unwrap_or_else(|| {
147 log::error!("unable to access a compatible graphics device");
148 std::process::exit(1);
149 })
150 };
151
152 let layer = metal::MetalLayer::new();
153 layer.set_device(&device);
154 layer.set_pixel_format(MTLPixelFormat::BGRA8Unorm);
155 layer.set_opaque(false);
156 layer.set_maximum_drawable_count(3);
157 unsafe {
158 let _: () = msg_send![&*layer, setAllowsNextDrawableTimeout: NO];
159 let _: () = msg_send![&*layer, setNeedsDisplayOnBoundsChange: YES];
160 let _: () = msg_send![
161 &*layer,
162 setAutoresizingMask: AutoresizingMask::WIDTH_SIZABLE
163 | AutoresizingMask::HEIGHT_SIZABLE
164 ];
165 }
166 #[cfg(feature = "runtime_shaders")]
167 let library = device
168 .new_library_with_source(&SHADERS_SOURCE_FILE, &metal::CompileOptions::new())
169 .expect("error building metal library");
170 #[cfg(not(feature = "runtime_shaders"))]
171 let library = device
172 .new_library_with_data(SHADERS_METALLIB)
173 .expect("error building metal library");
174
175 fn to_float2_bits(point: PointF) -> u64 {
176 let mut output = point.y.to_bits() as u64;
177 output <<= 32;
178 output |= point.x.to_bits() as u64;
179 output
180 }
181
182 let unit_vertices = [
183 to_float2_bits(point(0., 0.)),
184 to_float2_bits(point(1., 0.)),
185 to_float2_bits(point(0., 1.)),
186 to_float2_bits(point(0., 1.)),
187 to_float2_bits(point(1., 0.)),
188 to_float2_bits(point(1., 1.)),
189 ];
190 let unit_vertices = device.new_buffer_with_data(
191 unit_vertices.as_ptr() as *const c_void,
192 mem::size_of_val(&unit_vertices) as u64,
193 MTLResourceOptions::StorageModeManaged,
194 );
195
196 let paths_rasterization_pipeline_state = build_path_rasterization_pipeline_state(
197 &device,
198 &library,
199 "paths_rasterization",
200 "path_rasterization_vertex",
201 "path_rasterization_fragment",
202 MTLPixelFormat::BGRA8Unorm,
203 PATH_SAMPLE_COUNT,
204 );
205 let path_sprites_pipeline_state = build_path_sprite_pipeline_state(
206 &device,
207 &library,
208 "path_sprites",
209 "path_sprite_vertex",
210 "path_sprite_fragment",
211 MTLPixelFormat::BGRA8Unorm,
212 );
213 let shadows_pipeline_state = build_pipeline_state(
214 &device,
215 &library,
216 "shadows",
217 "shadow_vertex",
218 "shadow_fragment",
219 MTLPixelFormat::BGRA8Unorm,
220 );
221 let quads_pipeline_state = build_pipeline_state(
222 &device,
223 &library,
224 "quads",
225 "quad_vertex",
226 "quad_fragment",
227 MTLPixelFormat::BGRA8Unorm,
228 );
229 let underlines_pipeline_state = build_pipeline_state(
230 &device,
231 &library,
232 "underlines",
233 "underline_vertex",
234 "underline_fragment",
235 MTLPixelFormat::BGRA8Unorm,
236 );
237 let monochrome_sprites_pipeline_state = build_pipeline_state(
238 &device,
239 &library,
240 "monochrome_sprites",
241 "monochrome_sprite_vertex",
242 "monochrome_sprite_fragment",
243 MTLPixelFormat::BGRA8Unorm,
244 );
245 let polychrome_sprites_pipeline_state = build_pipeline_state(
246 &device,
247 &library,
248 "polychrome_sprites",
249 "polychrome_sprite_vertex",
250 "polychrome_sprite_fragment",
251 MTLPixelFormat::BGRA8Unorm,
252 );
253 let surfaces_pipeline_state = build_pipeline_state(
254 &device,
255 &library,
256 "surfaces",
257 "surface_vertex",
258 "surface_fragment",
259 MTLPixelFormat::BGRA8Unorm,
260 );
261
262 let command_queue = device.new_command_queue();
263 let sprite_atlas = Arc::new(MetalAtlas::new(device.clone()));
264 let core_video_texture_cache =
265 CVMetalTextureCache::new(None, device.clone(), None).unwrap();
266
267 Self {
268 device,
269 layer,
270 presents_with_transaction: false,
271 command_queue,
272 paths_rasterization_pipeline_state,
273 path_sprites_pipeline_state,
274 shadows_pipeline_state,
275 quads_pipeline_state,
276 underlines_pipeline_state,
277 monochrome_sprites_pipeline_state,
278 polychrome_sprites_pipeline_state,
279 surfaces_pipeline_state,
280 unit_vertices,
281 instance_buffer_pool,
282 sprite_atlas,
283 core_video_texture_cache,
284 path_intermediate_texture: None,
285 path_intermediate_msaa_texture: None,
286 path_sample_count: PATH_SAMPLE_COUNT,
287 }
288 }
289
290 pub fn layer(&self) -> &metal::MetalLayerRef {
291 &self.layer
292 }
293
294 pub fn layer_ptr(&self) -> *mut CAMetalLayer {
295 self.layer.as_ptr()
296 }
297
298 pub fn sprite_atlas(&self) -> &Arc<MetalAtlas> {
299 &self.sprite_atlas
300 }
301
302 pub fn set_presents_with_transaction(&mut self, presents_with_transaction: bool) {
303 self.presents_with_transaction = presents_with_transaction;
304 self.layer
305 .set_presents_with_transaction(presents_with_transaction);
306 }
307
308 pub fn update_drawable_size(&mut self, size: Size<DevicePixels>) {
309 let size = NSSize {
310 width: size.width.0 as f64,
311 height: size.height.0 as f64,
312 };
313 unsafe {
314 let _: () = msg_send![
315 self.layer(),
316 setDrawableSize: size
317 ];
318 }
319 let device_pixels_size = Size {
320 width: DevicePixels(size.width as i32),
321 height: DevicePixels(size.height as i32),
322 };
323 self.update_path_intermediate_textures(device_pixels_size);
324 }
325
326 fn update_path_intermediate_textures(&mut self, size: Size<DevicePixels>) {
327 // We are uncertain when this happens, but sometimes size can be 0 here. Most likely before
328 // the layout pass on window creation. Zero-sized texture creation causes SIGABRT.
329 // https://github.com/zed-industries/zed/issues/36229
330 if size.width.0 <= 0 || size.height.0 <= 0 {
331 self.path_intermediate_texture = None;
332 self.path_intermediate_msaa_texture = None;
333 return;
334 }
335
336 let texture_descriptor = metal::TextureDescriptor::new();
337 texture_descriptor.set_width(size.width.0 as u64);
338 texture_descriptor.set_height(size.height.0 as u64);
339 texture_descriptor.set_pixel_format(metal::MTLPixelFormat::BGRA8Unorm);
340 texture_descriptor
341 .set_usage(metal::MTLTextureUsage::RenderTarget | metal::MTLTextureUsage::ShaderRead);
342 self.path_intermediate_texture = Some(self.device.new_texture(&texture_descriptor));
343
344 if self.path_sample_count > 1 {
345 let mut msaa_descriptor = texture_descriptor;
346 msaa_descriptor.set_texture_type(metal::MTLTextureType::D2Multisample);
347 msaa_descriptor.set_storage_mode(metal::MTLStorageMode::Private);
348 msaa_descriptor.set_sample_count(self.path_sample_count as _);
349 self.path_intermediate_msaa_texture = Some(self.device.new_texture(&msaa_descriptor));
350 } else {
351 self.path_intermediate_msaa_texture = None;
352 }
353 }
354
355 pub fn update_transparency(&self, _transparent: bool) {
356 // todo(mac)?
357 }
358
359 pub fn destroy(&self) {
360 // nothing to do
361 }
362
363 pub fn draw(&mut self, scene: &Scene) {
364 let layer = self.layer.clone();
365 let viewport_size = layer.drawable_size();
366 let viewport_size: Size<DevicePixels> = size(
367 (viewport_size.width.ceil() as i32).into(),
368 (viewport_size.height.ceil() as i32).into(),
369 );
370 let drawable = if let Some(drawable) = layer.next_drawable() {
371 drawable
372 } else {
373 log::error!(
374 "failed to retrieve next drawable, drawable size: {:?}",
375 viewport_size
376 );
377 return;
378 };
379
380 loop {
381 let mut instance_buffer = self.instance_buffer_pool.lock().acquire(&self.device);
382
383 let command_buffer =
384 self.draw_primitives(scene, &mut instance_buffer, drawable, viewport_size);
385
386 match command_buffer {
387 Ok(command_buffer) => {
388 let instance_buffer_pool = self.instance_buffer_pool.clone();
389 let instance_buffer = Cell::new(Some(instance_buffer));
390 let block = ConcreteBlock::new(move |_| {
391 if let Some(instance_buffer) = instance_buffer.take() {
392 instance_buffer_pool.lock().release(instance_buffer);
393 }
394 });
395 let block = block.copy();
396 command_buffer.add_completed_handler(&block);
397
398 if self.presents_with_transaction {
399 command_buffer.commit();
400 command_buffer.wait_until_scheduled();
401 drawable.present();
402 } else {
403 command_buffer.present_drawable(drawable);
404 command_buffer.commit();
405 }
406 return;
407 }
408 Err(err) => {
409 log::error!(
410 "failed to render: {}. retrying with larger instance buffer size",
411 err
412 );
413 let mut instance_buffer_pool = self.instance_buffer_pool.lock();
414 let buffer_size = instance_buffer_pool.buffer_size;
415 if buffer_size >= 256 * 1024 * 1024 {
416 log::error!("instance buffer size grew too large: {}", buffer_size);
417 break;
418 }
419 instance_buffer_pool.reset(buffer_size * 2);
420 log::info!(
421 "increased instance buffer size to {}",
422 instance_buffer_pool.buffer_size
423 );
424 }
425 }
426 }
427 }
428
429 fn draw_primitives(
430 &mut self,
431 scene: &Scene,
432 instance_buffer: &mut InstanceBuffer,
433 drawable: &metal::MetalDrawableRef,
434 viewport_size: Size<DevicePixels>,
435 ) -> Result<metal::CommandBuffer> {
436 let command_queue = self.command_queue.clone();
437 let command_buffer = command_queue.new_command_buffer();
438 let alpha = if self.layer.is_opaque() { 1. } else { 0. };
439 let mut instance_offset = 0;
440
441 let mut command_encoder = new_command_encoder(
442 command_buffer,
443 drawable,
444 viewport_size,
445 |color_attachment| {
446 color_attachment.set_load_action(metal::MTLLoadAction::Clear);
447 color_attachment.set_clear_color(metal::MTLClearColor::new(0., 0., 0., alpha));
448 },
449 );
450
451 for batch in scene.batches() {
452 let ok = match batch {
453 PrimitiveBatch::Shadows(shadows) => self.draw_shadows(
454 shadows,
455 instance_buffer,
456 &mut instance_offset,
457 viewport_size,
458 command_encoder,
459 ),
460 PrimitiveBatch::Quads(quads) => self.draw_quads(
461 quads,
462 instance_buffer,
463 &mut instance_offset,
464 viewport_size,
465 command_encoder,
466 ),
467 PrimitiveBatch::Paths(paths) => {
468 command_encoder.end_encoding();
469
470 let did_draw = self.draw_paths_to_intermediate(
471 paths,
472 instance_buffer,
473 &mut instance_offset,
474 viewport_size,
475 command_buffer,
476 );
477
478 command_encoder = new_command_encoder(
479 command_buffer,
480 drawable,
481 viewport_size,
482 |color_attachment| {
483 color_attachment.set_load_action(metal::MTLLoadAction::Load);
484 },
485 );
486
487 if did_draw {
488 self.draw_paths_from_intermediate(
489 paths,
490 instance_buffer,
491 &mut instance_offset,
492 viewport_size,
493 command_encoder,
494 )
495 } else {
496 false
497 }
498 }
499 PrimitiveBatch::Underlines(underlines) => self.draw_underlines(
500 underlines,
501 instance_buffer,
502 &mut instance_offset,
503 viewport_size,
504 command_encoder,
505 ),
506 PrimitiveBatch::MonochromeSprites {
507 texture_id,
508 sprites,
509 } => self.draw_monochrome_sprites(
510 texture_id,
511 sprites,
512 instance_buffer,
513 &mut instance_offset,
514 viewport_size,
515 command_encoder,
516 ),
517 PrimitiveBatch::PolychromeSprites {
518 texture_id,
519 sprites,
520 } => self.draw_polychrome_sprites(
521 texture_id,
522 sprites,
523 instance_buffer,
524 &mut instance_offset,
525 viewport_size,
526 command_encoder,
527 ),
528 PrimitiveBatch::Surfaces(surfaces) => self.draw_surfaces(
529 surfaces,
530 instance_buffer,
531 &mut instance_offset,
532 viewport_size,
533 command_encoder,
534 ),
535 };
536 if !ok {
537 command_encoder.end_encoding();
538 anyhow::bail!(
539 "scene too large: {} paths, {} shadows, {} quads, {} underlines, {} mono, {} poly, {} surfaces",
540 scene.paths.len(),
541 scene.shadows.len(),
542 scene.quads.len(),
543 scene.underlines.len(),
544 scene.monochrome_sprites.len(),
545 scene.polychrome_sprites.len(),
546 scene.surfaces.len(),
547 );
548 }
549 }
550
551 command_encoder.end_encoding();
552
553 instance_buffer.metal_buffer.did_modify_range(NSRange {
554 location: 0,
555 length: instance_offset as NSUInteger,
556 });
557 Ok(command_buffer.to_owned())
558 }
559
560 fn draw_paths_to_intermediate(
561 &self,
562 paths: &[Path<ScaledPixels>],
563 instance_buffer: &mut InstanceBuffer,
564 instance_offset: &mut usize,
565 viewport_size: Size<DevicePixels>,
566 command_buffer: &metal::CommandBufferRef,
567 ) -> bool {
568 if paths.is_empty() {
569 return true;
570 }
571 let Some(intermediate_texture) = &self.path_intermediate_texture else {
572 return false;
573 };
574
575 let render_pass_descriptor = metal::RenderPassDescriptor::new();
576 let color_attachment = render_pass_descriptor
577 .color_attachments()
578 .object_at(0)
579 .unwrap();
580 color_attachment.set_load_action(metal::MTLLoadAction::Clear);
581 color_attachment.set_clear_color(metal::MTLClearColor::new(0., 0., 0., 0.));
582
583 if let Some(msaa_texture) = &self.path_intermediate_msaa_texture {
584 color_attachment.set_texture(Some(msaa_texture));
585 color_attachment.set_resolve_texture(Some(intermediate_texture));
586 color_attachment.set_store_action(metal::MTLStoreAction::MultisampleResolve);
587 } else {
588 color_attachment.set_texture(Some(intermediate_texture));
589 color_attachment.set_store_action(metal::MTLStoreAction::Store);
590 }
591
592 let command_encoder = command_buffer.new_render_command_encoder(render_pass_descriptor);
593 command_encoder.set_render_pipeline_state(&self.paths_rasterization_pipeline_state);
594
595 align_offset(instance_offset);
596 let mut vertices = Vec::new();
597 for path in paths {
598 vertices.extend(path.vertices.iter().map(|v| PathRasterizationVertex {
599 xy_position: v.xy_position,
600 st_position: v.st_position,
601 color: path.color,
602 bounds: path.bounds.intersect(&path.content_mask.bounds),
603 }));
604 }
605 let vertices_bytes_len = mem::size_of_val(vertices.as_slice());
606 let next_offset = *instance_offset + vertices_bytes_len;
607 if next_offset > instance_buffer.size {
608 command_encoder.end_encoding();
609 return false;
610 }
611 command_encoder.set_vertex_buffer(
612 PathRasterizationInputIndex::Vertices as u64,
613 Some(&instance_buffer.metal_buffer),
614 *instance_offset as u64,
615 );
616 command_encoder.set_vertex_bytes(
617 PathRasterizationInputIndex::ViewportSize as u64,
618 mem::size_of_val(&viewport_size) as u64,
619 &viewport_size as *const Size<DevicePixels> as *const _,
620 );
621 command_encoder.set_fragment_buffer(
622 PathRasterizationInputIndex::Vertices as u64,
623 Some(&instance_buffer.metal_buffer),
624 *instance_offset as u64,
625 );
626 let buffer_contents =
627 unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
628 unsafe {
629 ptr::copy_nonoverlapping(
630 vertices.as_ptr() as *const u8,
631 buffer_contents,
632 vertices_bytes_len,
633 );
634 }
635 command_encoder.draw_primitives(
636 metal::MTLPrimitiveType::Triangle,
637 0,
638 vertices.len() as u64,
639 );
640 *instance_offset = next_offset;
641
642 command_encoder.end_encoding();
643 true
644 }
645
646 fn draw_shadows(
647 &self,
648 shadows: &[Shadow],
649 instance_buffer: &mut InstanceBuffer,
650 instance_offset: &mut usize,
651 viewport_size: Size<DevicePixels>,
652 command_encoder: &metal::RenderCommandEncoderRef,
653 ) -> bool {
654 if shadows.is_empty() {
655 return true;
656 }
657 align_offset(instance_offset);
658
659 command_encoder.set_render_pipeline_state(&self.shadows_pipeline_state);
660 command_encoder.set_vertex_buffer(
661 ShadowInputIndex::Vertices as u64,
662 Some(&self.unit_vertices),
663 0,
664 );
665 command_encoder.set_vertex_buffer(
666 ShadowInputIndex::Shadows as u64,
667 Some(&instance_buffer.metal_buffer),
668 *instance_offset as u64,
669 );
670 command_encoder.set_fragment_buffer(
671 ShadowInputIndex::Shadows as u64,
672 Some(&instance_buffer.metal_buffer),
673 *instance_offset as u64,
674 );
675
676 command_encoder.set_vertex_bytes(
677 ShadowInputIndex::ViewportSize as u64,
678 mem::size_of_val(&viewport_size) as u64,
679 &viewport_size as *const Size<DevicePixels> as *const _,
680 );
681
682 let shadow_bytes_len = mem::size_of_val(shadows);
683 let buffer_contents =
684 unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
685
686 let next_offset = *instance_offset + shadow_bytes_len;
687 if next_offset > instance_buffer.size {
688 return false;
689 }
690
691 unsafe {
692 ptr::copy_nonoverlapping(
693 shadows.as_ptr() as *const u8,
694 buffer_contents,
695 shadow_bytes_len,
696 );
697 }
698
699 command_encoder.draw_primitives_instanced(
700 metal::MTLPrimitiveType::Triangle,
701 0,
702 6,
703 shadows.len() as u64,
704 );
705 *instance_offset = next_offset;
706 true
707 }
708
709 fn draw_quads(
710 &self,
711 quads: &[Quad],
712 instance_buffer: &mut InstanceBuffer,
713 instance_offset: &mut usize,
714 viewport_size: Size<DevicePixels>,
715 command_encoder: &metal::RenderCommandEncoderRef,
716 ) -> bool {
717 if quads.is_empty() {
718 return true;
719 }
720 align_offset(instance_offset);
721
722 command_encoder.set_render_pipeline_state(&self.quads_pipeline_state);
723 command_encoder.set_vertex_buffer(
724 QuadInputIndex::Vertices as u64,
725 Some(&self.unit_vertices),
726 0,
727 );
728 command_encoder.set_vertex_buffer(
729 QuadInputIndex::Quads as u64,
730 Some(&instance_buffer.metal_buffer),
731 *instance_offset as u64,
732 );
733 command_encoder.set_fragment_buffer(
734 QuadInputIndex::Quads as u64,
735 Some(&instance_buffer.metal_buffer),
736 *instance_offset as u64,
737 );
738
739 command_encoder.set_vertex_bytes(
740 QuadInputIndex::ViewportSize as u64,
741 mem::size_of_val(&viewport_size) as u64,
742 &viewport_size as *const Size<DevicePixels> as *const _,
743 );
744
745 let quad_bytes_len = mem::size_of_val(quads);
746 let buffer_contents =
747 unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
748
749 let next_offset = *instance_offset + quad_bytes_len;
750 if next_offset > instance_buffer.size {
751 return false;
752 }
753
754 unsafe {
755 ptr::copy_nonoverlapping(quads.as_ptr() as *const u8, buffer_contents, quad_bytes_len);
756 }
757
758 command_encoder.draw_primitives_instanced(
759 metal::MTLPrimitiveType::Triangle,
760 0,
761 6,
762 quads.len() as u64,
763 );
764 *instance_offset = next_offset;
765 true
766 }
767
768 fn draw_paths_from_intermediate(
769 &self,
770 paths: &[Path<ScaledPixels>],
771 instance_buffer: &mut InstanceBuffer,
772 instance_offset: &mut usize,
773 viewport_size: Size<DevicePixels>,
774 command_encoder: &metal::RenderCommandEncoderRef,
775 ) -> bool {
776 let Some(first_path) = paths.first() else {
777 return true;
778 };
779
780 let Some(ref intermediate_texture) = self.path_intermediate_texture else {
781 return false;
782 };
783
784 command_encoder.set_render_pipeline_state(&self.path_sprites_pipeline_state);
785 command_encoder.set_vertex_buffer(
786 SpriteInputIndex::Vertices as u64,
787 Some(&self.unit_vertices),
788 0,
789 );
790 command_encoder.set_vertex_bytes(
791 SpriteInputIndex::ViewportSize as u64,
792 mem::size_of_val(&viewport_size) as u64,
793 &viewport_size as *const Size<DevicePixels> as *const _,
794 );
795
796 command_encoder.set_fragment_texture(
797 SpriteInputIndex::AtlasTexture as u64,
798 Some(intermediate_texture),
799 );
800
801 // When copying paths from the intermediate texture to the drawable,
802 // each pixel must only be copied once, in case of transparent paths.
803 //
804 // If all paths have the same draw order, then their bounds are all
805 // disjoint, so we can copy each path's bounds individually. If this
806 // batch combines different draw orders, we perform a single copy
807 // for a minimal spanning rect.
808 let sprites;
809 if paths.last().unwrap().order == first_path.order {
810 sprites = paths
811 .iter()
812 .map(|path| PathSprite {
813 bounds: path.clipped_bounds(),
814 })
815 .collect();
816 } else {
817 let mut bounds = first_path.clipped_bounds();
818 for path in paths.iter().skip(1) {
819 bounds = bounds.union(&path.clipped_bounds());
820 }
821 sprites = vec![PathSprite { bounds }];
822 }
823
824 align_offset(instance_offset);
825 let sprite_bytes_len = mem::size_of_val(sprites.as_slice());
826 let next_offset = *instance_offset + sprite_bytes_len;
827 if next_offset > instance_buffer.size {
828 return false;
829 }
830
831 command_encoder.set_vertex_buffer(
832 SpriteInputIndex::Sprites as u64,
833 Some(&instance_buffer.metal_buffer),
834 *instance_offset as u64,
835 );
836
837 let buffer_contents =
838 unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
839 unsafe {
840 ptr::copy_nonoverlapping(
841 sprites.as_ptr() as *const u8,
842 buffer_contents,
843 sprite_bytes_len,
844 );
845 }
846
847 command_encoder.draw_primitives_instanced(
848 metal::MTLPrimitiveType::Triangle,
849 0,
850 6,
851 sprites.len() as u64,
852 );
853 *instance_offset = next_offset;
854
855 true
856 }
857
858 fn draw_underlines(
859 &self,
860 underlines: &[Underline],
861 instance_buffer: &mut InstanceBuffer,
862 instance_offset: &mut usize,
863 viewport_size: Size<DevicePixels>,
864 command_encoder: &metal::RenderCommandEncoderRef,
865 ) -> bool {
866 if underlines.is_empty() {
867 return true;
868 }
869 align_offset(instance_offset);
870
871 command_encoder.set_render_pipeline_state(&self.underlines_pipeline_state);
872 command_encoder.set_vertex_buffer(
873 UnderlineInputIndex::Vertices as u64,
874 Some(&self.unit_vertices),
875 0,
876 );
877 command_encoder.set_vertex_buffer(
878 UnderlineInputIndex::Underlines as u64,
879 Some(&instance_buffer.metal_buffer),
880 *instance_offset as u64,
881 );
882 command_encoder.set_fragment_buffer(
883 UnderlineInputIndex::Underlines as u64,
884 Some(&instance_buffer.metal_buffer),
885 *instance_offset as u64,
886 );
887
888 command_encoder.set_vertex_bytes(
889 UnderlineInputIndex::ViewportSize as u64,
890 mem::size_of_val(&viewport_size) as u64,
891 &viewport_size as *const Size<DevicePixels> as *const _,
892 );
893
894 let underline_bytes_len = mem::size_of_val(underlines);
895 let buffer_contents =
896 unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
897
898 let next_offset = *instance_offset + underline_bytes_len;
899 if next_offset > instance_buffer.size {
900 return false;
901 }
902
903 unsafe {
904 ptr::copy_nonoverlapping(
905 underlines.as_ptr() as *const u8,
906 buffer_contents,
907 underline_bytes_len,
908 );
909 }
910
911 command_encoder.draw_primitives_instanced(
912 metal::MTLPrimitiveType::Triangle,
913 0,
914 6,
915 underlines.len() as u64,
916 );
917 *instance_offset = next_offset;
918 true
919 }
920
921 fn draw_monochrome_sprites(
922 &self,
923 texture_id: AtlasTextureId,
924 sprites: &[MonochromeSprite],
925 instance_buffer: &mut InstanceBuffer,
926 instance_offset: &mut usize,
927 viewport_size: Size<DevicePixels>,
928 command_encoder: &metal::RenderCommandEncoderRef,
929 ) -> bool {
930 if sprites.is_empty() {
931 return true;
932 }
933 align_offset(instance_offset);
934
935 let sprite_bytes_len = mem::size_of_val(sprites);
936 let buffer_contents =
937 unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
938
939 let next_offset = *instance_offset + sprite_bytes_len;
940 if next_offset > instance_buffer.size {
941 return false;
942 }
943
944 let texture = self.sprite_atlas.metal_texture(texture_id);
945 let texture_size = size(
946 DevicePixels(texture.width() as i32),
947 DevicePixels(texture.height() as i32),
948 );
949 command_encoder.set_render_pipeline_state(&self.monochrome_sprites_pipeline_state);
950 command_encoder.set_vertex_buffer(
951 SpriteInputIndex::Vertices as u64,
952 Some(&self.unit_vertices),
953 0,
954 );
955 command_encoder.set_vertex_buffer(
956 SpriteInputIndex::Sprites as u64,
957 Some(&instance_buffer.metal_buffer),
958 *instance_offset as u64,
959 );
960 command_encoder.set_vertex_bytes(
961 SpriteInputIndex::ViewportSize as u64,
962 mem::size_of_val(&viewport_size) as u64,
963 &viewport_size as *const Size<DevicePixels> as *const _,
964 );
965 command_encoder.set_vertex_bytes(
966 SpriteInputIndex::AtlasTextureSize as u64,
967 mem::size_of_val(&texture_size) as u64,
968 &texture_size as *const Size<DevicePixels> as *const _,
969 );
970 command_encoder.set_fragment_buffer(
971 SpriteInputIndex::Sprites as u64,
972 Some(&instance_buffer.metal_buffer),
973 *instance_offset as u64,
974 );
975 command_encoder.set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
976
977 unsafe {
978 ptr::copy_nonoverlapping(
979 sprites.as_ptr() as *const u8,
980 buffer_contents,
981 sprite_bytes_len,
982 );
983 }
984
985 command_encoder.draw_primitives_instanced(
986 metal::MTLPrimitiveType::Triangle,
987 0,
988 6,
989 sprites.len() as u64,
990 );
991 *instance_offset = next_offset;
992 true
993 }
994
995 fn draw_polychrome_sprites(
996 &self,
997 texture_id: AtlasTextureId,
998 sprites: &[PolychromeSprite],
999 instance_buffer: &mut InstanceBuffer,
1000 instance_offset: &mut usize,
1001 viewport_size: Size<DevicePixels>,
1002 command_encoder: &metal::RenderCommandEncoderRef,
1003 ) -> bool {
1004 if sprites.is_empty() {
1005 return true;
1006 }
1007 align_offset(instance_offset);
1008
1009 let texture = self.sprite_atlas.metal_texture(texture_id);
1010 let texture_size = size(
1011 DevicePixels(texture.width() as i32),
1012 DevicePixels(texture.height() as i32),
1013 );
1014 command_encoder.set_render_pipeline_state(&self.polychrome_sprites_pipeline_state);
1015 command_encoder.set_vertex_buffer(
1016 SpriteInputIndex::Vertices as u64,
1017 Some(&self.unit_vertices),
1018 0,
1019 );
1020 command_encoder.set_vertex_buffer(
1021 SpriteInputIndex::Sprites as u64,
1022 Some(&instance_buffer.metal_buffer),
1023 *instance_offset as u64,
1024 );
1025 command_encoder.set_vertex_bytes(
1026 SpriteInputIndex::ViewportSize as u64,
1027 mem::size_of_val(&viewport_size) as u64,
1028 &viewport_size as *const Size<DevicePixels> as *const _,
1029 );
1030 command_encoder.set_vertex_bytes(
1031 SpriteInputIndex::AtlasTextureSize as u64,
1032 mem::size_of_val(&texture_size) as u64,
1033 &texture_size as *const Size<DevicePixels> as *const _,
1034 );
1035 command_encoder.set_fragment_buffer(
1036 SpriteInputIndex::Sprites as u64,
1037 Some(&instance_buffer.metal_buffer),
1038 *instance_offset as u64,
1039 );
1040 command_encoder.set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
1041
1042 let sprite_bytes_len = mem::size_of_val(sprites);
1043 let buffer_contents =
1044 unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
1045
1046 let next_offset = *instance_offset + sprite_bytes_len;
1047 if next_offset > instance_buffer.size {
1048 return false;
1049 }
1050
1051 unsafe {
1052 ptr::copy_nonoverlapping(
1053 sprites.as_ptr() as *const u8,
1054 buffer_contents,
1055 sprite_bytes_len,
1056 );
1057 }
1058
1059 command_encoder.draw_primitives_instanced(
1060 metal::MTLPrimitiveType::Triangle,
1061 0,
1062 6,
1063 sprites.len() as u64,
1064 );
1065 *instance_offset = next_offset;
1066 true
1067 }
1068
1069 fn draw_surfaces(
1070 &mut self,
1071 surfaces: &[PaintSurface],
1072 instance_buffer: &mut InstanceBuffer,
1073 instance_offset: &mut usize,
1074 viewport_size: Size<DevicePixels>,
1075 command_encoder: &metal::RenderCommandEncoderRef,
1076 ) -> bool {
1077 command_encoder.set_render_pipeline_state(&self.surfaces_pipeline_state);
1078 command_encoder.set_vertex_buffer(
1079 SurfaceInputIndex::Vertices as u64,
1080 Some(&self.unit_vertices),
1081 0,
1082 );
1083 command_encoder.set_vertex_bytes(
1084 SurfaceInputIndex::ViewportSize as u64,
1085 mem::size_of_val(&viewport_size) as u64,
1086 &viewport_size as *const Size<DevicePixels> as *const _,
1087 );
1088
1089 for surface in surfaces {
1090 let texture_size = size(
1091 DevicePixels::from(surface.image_buffer.get_width() as i32),
1092 DevicePixels::from(surface.image_buffer.get_height() as i32),
1093 );
1094
1095 assert_eq!(
1096 surface.image_buffer.get_pixel_format(),
1097 kCVPixelFormatType_420YpCbCr8BiPlanarFullRange
1098 );
1099
1100 let y_texture = self
1101 .core_video_texture_cache
1102 .create_texture_from_image(
1103 surface.image_buffer.as_concrete_TypeRef(),
1104 None,
1105 MTLPixelFormat::R8Unorm,
1106 surface.image_buffer.get_width_of_plane(0),
1107 surface.image_buffer.get_height_of_plane(0),
1108 0,
1109 )
1110 .unwrap();
1111 let cb_cr_texture = self
1112 .core_video_texture_cache
1113 .create_texture_from_image(
1114 surface.image_buffer.as_concrete_TypeRef(),
1115 None,
1116 MTLPixelFormat::RG8Unorm,
1117 surface.image_buffer.get_width_of_plane(1),
1118 surface.image_buffer.get_height_of_plane(1),
1119 1,
1120 )
1121 .unwrap();
1122
1123 align_offset(instance_offset);
1124 let next_offset = *instance_offset + mem::size_of::<Surface>();
1125 if next_offset > instance_buffer.size {
1126 return false;
1127 }
1128
1129 command_encoder.set_vertex_buffer(
1130 SurfaceInputIndex::Surfaces as u64,
1131 Some(&instance_buffer.metal_buffer),
1132 *instance_offset as u64,
1133 );
1134 command_encoder.set_vertex_bytes(
1135 SurfaceInputIndex::TextureSize as u64,
1136 mem::size_of_val(&texture_size) as u64,
1137 &texture_size as *const Size<DevicePixels> as *const _,
1138 );
1139 // let y_texture = y_texture.get_texture().unwrap().
1140 command_encoder.set_fragment_texture(SurfaceInputIndex::YTexture as u64, unsafe {
1141 let texture = CVMetalTextureGetTexture(y_texture.as_concrete_TypeRef());
1142 Some(metal::TextureRef::from_ptr(texture as *mut _))
1143 });
1144 command_encoder.set_fragment_texture(SurfaceInputIndex::CbCrTexture as u64, unsafe {
1145 let texture = CVMetalTextureGetTexture(cb_cr_texture.as_concrete_TypeRef());
1146 Some(metal::TextureRef::from_ptr(texture as *mut _))
1147 });
1148
1149 unsafe {
1150 let buffer_contents = (instance_buffer.metal_buffer.contents() as *mut u8)
1151 .add(*instance_offset)
1152 as *mut SurfaceBounds;
1153 ptr::write(
1154 buffer_contents,
1155 SurfaceBounds {
1156 bounds: surface.bounds,
1157 content_mask: surface.content_mask.clone(),
1158 },
1159 );
1160 }
1161
1162 command_encoder.draw_primitives(metal::MTLPrimitiveType::Triangle, 0, 6);
1163 *instance_offset = next_offset;
1164 }
1165 true
1166 }
1167}
1168
1169fn new_command_encoder<'a>(
1170 command_buffer: &'a metal::CommandBufferRef,
1171 drawable: &'a metal::MetalDrawableRef,
1172 viewport_size: Size<DevicePixels>,
1173 configure_color_attachment: impl Fn(&RenderPassColorAttachmentDescriptorRef),
1174) -> &'a metal::RenderCommandEncoderRef {
1175 let render_pass_descriptor = metal::RenderPassDescriptor::new();
1176 let color_attachment = render_pass_descriptor
1177 .color_attachments()
1178 .object_at(0)
1179 .unwrap();
1180 color_attachment.set_texture(Some(drawable.texture()));
1181 color_attachment.set_store_action(metal::MTLStoreAction::Store);
1182 configure_color_attachment(color_attachment);
1183
1184 let command_encoder = command_buffer.new_render_command_encoder(render_pass_descriptor);
1185 command_encoder.set_viewport(metal::MTLViewport {
1186 originX: 0.0,
1187 originY: 0.0,
1188 width: i32::from(viewport_size.width) as f64,
1189 height: i32::from(viewport_size.height) as f64,
1190 znear: 0.0,
1191 zfar: 1.0,
1192 });
1193 command_encoder
1194}
1195
1196fn build_pipeline_state(
1197 device: &metal::DeviceRef,
1198 library: &metal::LibraryRef,
1199 label: &str,
1200 vertex_fn_name: &str,
1201 fragment_fn_name: &str,
1202 pixel_format: metal::MTLPixelFormat,
1203) -> metal::RenderPipelineState {
1204 let vertex_fn = library
1205 .get_function(vertex_fn_name, None)
1206 .expect("error locating vertex function");
1207 let fragment_fn = library
1208 .get_function(fragment_fn_name, None)
1209 .expect("error locating fragment function");
1210
1211 let descriptor = metal::RenderPipelineDescriptor::new();
1212 descriptor.set_label(label);
1213 descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
1214 descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
1215 let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
1216 color_attachment.set_pixel_format(pixel_format);
1217 color_attachment.set_blending_enabled(true);
1218 color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
1219 color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
1220 color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::SourceAlpha);
1221 color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::One);
1222 color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
1223 color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::One);
1224
1225 device
1226 .new_render_pipeline_state(&descriptor)
1227 .expect("could not create render pipeline state")
1228}
1229
1230fn build_path_sprite_pipeline_state(
1231 device: &metal::DeviceRef,
1232 library: &metal::LibraryRef,
1233 label: &str,
1234 vertex_fn_name: &str,
1235 fragment_fn_name: &str,
1236 pixel_format: metal::MTLPixelFormat,
1237) -> metal::RenderPipelineState {
1238 let vertex_fn = library
1239 .get_function(vertex_fn_name, None)
1240 .expect("error locating vertex function");
1241 let fragment_fn = library
1242 .get_function(fragment_fn_name, None)
1243 .expect("error locating fragment function");
1244
1245 let descriptor = metal::RenderPipelineDescriptor::new();
1246 descriptor.set_label(label);
1247 descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
1248 descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
1249 let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
1250 color_attachment.set_pixel_format(pixel_format);
1251 color_attachment.set_blending_enabled(true);
1252 color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
1253 color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
1254 color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::One);
1255 color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::One);
1256 color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
1257 color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::One);
1258
1259 device
1260 .new_render_pipeline_state(&descriptor)
1261 .expect("could not create render pipeline state")
1262}
1263
1264fn build_path_rasterization_pipeline_state(
1265 device: &metal::DeviceRef,
1266 library: &metal::LibraryRef,
1267 label: &str,
1268 vertex_fn_name: &str,
1269 fragment_fn_name: &str,
1270 pixel_format: metal::MTLPixelFormat,
1271 path_sample_count: u32,
1272) -> metal::RenderPipelineState {
1273 let vertex_fn = library
1274 .get_function(vertex_fn_name, None)
1275 .expect("error locating vertex function");
1276 let fragment_fn = library
1277 .get_function(fragment_fn_name, None)
1278 .expect("error locating fragment function");
1279
1280 let descriptor = metal::RenderPipelineDescriptor::new();
1281 descriptor.set_label(label);
1282 descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
1283 descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
1284 if path_sample_count > 1 {
1285 descriptor.set_raster_sample_count(path_sample_count as _);
1286 descriptor.set_alpha_to_coverage_enabled(false);
1287 }
1288 let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
1289 color_attachment.set_pixel_format(pixel_format);
1290 color_attachment.set_blending_enabled(true);
1291 color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
1292 color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
1293 color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::One);
1294 color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::One);
1295 color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
1296 color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
1297
1298 device
1299 .new_render_pipeline_state(&descriptor)
1300 .expect("could not create render pipeline state")
1301}
1302
1303// Align to multiples of 256 make Metal happy.
1304fn align_offset(offset: &mut usize) {
1305 *offset = (*offset).div_ceil(256) * 256;
1306}
1307
1308#[repr(C)]
1309enum ShadowInputIndex {
1310 Vertices = 0,
1311 Shadows = 1,
1312 ViewportSize = 2,
1313}
1314
1315#[repr(C)]
1316enum QuadInputIndex {
1317 Vertices = 0,
1318 Quads = 1,
1319 ViewportSize = 2,
1320}
1321
1322#[repr(C)]
1323enum UnderlineInputIndex {
1324 Vertices = 0,
1325 Underlines = 1,
1326 ViewportSize = 2,
1327}
1328
1329#[repr(C)]
1330enum SpriteInputIndex {
1331 Vertices = 0,
1332 Sprites = 1,
1333 ViewportSize = 2,
1334 AtlasTextureSize = 3,
1335 AtlasTexture = 4,
1336}
1337
1338#[repr(C)]
1339enum SurfaceInputIndex {
1340 Vertices = 0,
1341 Surfaces = 1,
1342 ViewportSize = 2,
1343 TextureSize = 3,
1344 YTexture = 4,
1345 CbCrTexture = 5,
1346}
1347
1348#[repr(C)]
1349enum PathRasterizationInputIndex {
1350 Vertices = 0,
1351 ViewportSize = 1,
1352}
1353
1354#[derive(Clone, Debug, Eq, PartialEq)]
1355#[repr(C)]
1356pub struct PathSprite {
1357 pub bounds: Bounds<ScaledPixels>,
1358}
1359
1360#[derive(Clone, Debug, Eq, PartialEq)]
1361#[repr(C)]
1362pub struct SurfaceBounds {
1363 pub bounds: Bounds<ScaledPixels>,
1364 pub content_mask: ContentMask<ScaledPixels>,
1365}