1use super::metal_atlas::MetalAtlas;
2use crate::{
3 AtlasTextureId, AtlasTextureKind, AtlasTile, Background, Bounds, ContentMask, DevicePixels,
4 MonochromeSprite, PaintSurface, Path, PathId, PathVertex, PolychromeSprite, PrimitiveBatch,
5 Quad, ScaledPixels, Scene, Shadow, Size, Surface, Underline, point, size,
6};
7use anyhow::{Context as _, Result};
8use block::ConcreteBlock;
9use cocoa::{
10 base::{NO, YES},
11 foundation::{NSSize, NSUInteger},
12 quartzcore::AutoresizingMask,
13};
14use collections::HashMap;
15use core_foundation::base::TCFType;
16use core_video::{
17 metal_texture::CVMetalTextureGetTexture, metal_texture_cache::CVMetalTextureCache,
18 pixel_buffer::kCVPixelFormatType_420YpCbCr8BiPlanarFullRange,
19};
20use foreign_types::{ForeignType, ForeignTypeRef};
21use metal::{CAMetalLayer, CommandQueue, MTLPixelFormat, MTLResourceOptions, NSRange};
22use objc::{self, msg_send, sel, sel_impl};
23use parking_lot::Mutex;
24use smallvec::SmallVec;
25use std::{cell::Cell, ffi::c_void, mem, ptr, sync::Arc};
26
27// Exported to metal
28pub(crate) type PointF = crate::Point<f32>;
29
30#[cfg(not(feature = "runtime_shaders"))]
31const SHADERS_METALLIB: &[u8] = include_bytes!(concat!(env!("OUT_DIR"), "/shaders.metallib"));
32#[cfg(feature = "runtime_shaders")]
33const SHADERS_SOURCE_FILE: &str = include_str!(concat!(env!("OUT_DIR"), "/stitched_shaders.metal"));
34// Use 4x MSAA, all devices support it.
35// https://developer.apple.com/documentation/metal/mtldevice/1433355-supportstexturesamplecount
36const PATH_SAMPLE_COUNT: u32 = 4;
37
38pub type Context = Arc<Mutex<InstanceBufferPool>>;
39pub type Renderer = MetalRenderer;
40
41pub unsafe fn new_renderer(
42 context: self::Context,
43 _native_window: *mut c_void,
44 _native_view: *mut c_void,
45 _bounds: crate::Size<f32>,
46 _transparent: bool,
47) -> Renderer {
48 MetalRenderer::new(context)
49}
50
51pub(crate) struct InstanceBufferPool {
52 buffer_size: usize,
53 buffers: Vec<metal::Buffer>,
54}
55
56impl Default for InstanceBufferPool {
57 fn default() -> Self {
58 Self {
59 buffer_size: 2 * 1024 * 1024,
60 buffers: Vec::new(),
61 }
62 }
63}
64
65pub(crate) struct InstanceBuffer {
66 metal_buffer: metal::Buffer,
67 size: usize,
68}
69
70impl InstanceBufferPool {
71 pub(crate) fn reset(&mut self, buffer_size: usize) {
72 self.buffer_size = buffer_size;
73 self.buffers.clear();
74 }
75
76 pub(crate) fn acquire(&mut self, device: &metal::Device) -> InstanceBuffer {
77 let buffer = self.buffers.pop().unwrap_or_else(|| {
78 device.new_buffer(
79 self.buffer_size as u64,
80 MTLResourceOptions::StorageModeManaged,
81 )
82 });
83 InstanceBuffer {
84 metal_buffer: buffer,
85 size: self.buffer_size,
86 }
87 }
88
89 pub(crate) fn release(&mut self, buffer: InstanceBuffer) {
90 if buffer.size == self.buffer_size {
91 self.buffers.push(buffer.metal_buffer)
92 }
93 }
94}
95
96pub(crate) struct MetalRenderer {
97 device: metal::Device,
98 layer: metal::MetalLayer,
99 presents_with_transaction: bool,
100 command_queue: CommandQueue,
101 paths_rasterization_pipeline_state: metal::RenderPipelineState,
102 path_sprites_pipeline_state: metal::RenderPipelineState,
103 shadows_pipeline_state: metal::RenderPipelineState,
104 quads_pipeline_state: metal::RenderPipelineState,
105 underlines_pipeline_state: metal::RenderPipelineState,
106 monochrome_sprites_pipeline_state: metal::RenderPipelineState,
107 polychrome_sprites_pipeline_state: metal::RenderPipelineState,
108 surfaces_pipeline_state: metal::RenderPipelineState,
109 unit_vertices: metal::Buffer,
110 #[allow(clippy::arc_with_non_send_sync)]
111 instance_buffer_pool: Arc<Mutex<InstanceBufferPool>>,
112 sprite_atlas: Arc<MetalAtlas>,
113 core_video_texture_cache: core_video::metal_texture_cache::CVMetalTextureCache,
114}
115
116impl MetalRenderer {
117 pub fn new(instance_buffer_pool: Arc<Mutex<InstanceBufferPool>>) -> Self {
118 // Prefer low‐power integrated GPUs on Intel Mac. On Apple
119 // Silicon, there is only ever one GPU, so this is equivalent to
120 // `metal::Device::system_default()`.
121 let mut devices = metal::Device::all();
122 devices.sort_by_key(|device| (device.is_removable(), device.is_low_power()));
123 let Some(device) = devices.pop() else {
124 log::error!("unable to access a compatible graphics device");
125 std::process::exit(1);
126 };
127
128 let layer = metal::MetalLayer::new();
129 layer.set_device(&device);
130 layer.set_pixel_format(MTLPixelFormat::BGRA8Unorm);
131 layer.set_opaque(false);
132 layer.set_maximum_drawable_count(3);
133 unsafe {
134 let _: () = msg_send![&*layer, setAllowsNextDrawableTimeout: NO];
135 let _: () = msg_send![&*layer, setNeedsDisplayOnBoundsChange: YES];
136 let _: () = msg_send![
137 &*layer,
138 setAutoresizingMask: AutoresizingMask::WIDTH_SIZABLE
139 | AutoresizingMask::HEIGHT_SIZABLE
140 ];
141 }
142 #[cfg(feature = "runtime_shaders")]
143 let library = device
144 .new_library_with_source(&SHADERS_SOURCE_FILE, &metal::CompileOptions::new())
145 .expect("error building metal library");
146 #[cfg(not(feature = "runtime_shaders"))]
147 let library = device
148 .new_library_with_data(SHADERS_METALLIB)
149 .expect("error building metal library");
150
151 fn to_float2_bits(point: PointF) -> u64 {
152 let mut output = point.y.to_bits() as u64;
153 output <<= 32;
154 output |= point.x.to_bits() as u64;
155 output
156 }
157
158 let unit_vertices = [
159 to_float2_bits(point(0., 0.)),
160 to_float2_bits(point(1., 0.)),
161 to_float2_bits(point(0., 1.)),
162 to_float2_bits(point(0., 1.)),
163 to_float2_bits(point(1., 0.)),
164 to_float2_bits(point(1., 1.)),
165 ];
166 let unit_vertices = device.new_buffer_with_data(
167 unit_vertices.as_ptr() as *const c_void,
168 mem::size_of_val(&unit_vertices) as u64,
169 MTLResourceOptions::StorageModeManaged,
170 );
171
172 let paths_rasterization_pipeline_state = build_path_rasterization_pipeline_state(
173 &device,
174 &library,
175 "paths_rasterization",
176 "path_rasterization_vertex",
177 "path_rasterization_fragment",
178 MTLPixelFormat::R16Float,
179 PATH_SAMPLE_COUNT,
180 );
181 let path_sprites_pipeline_state = build_pipeline_state(
182 &device,
183 &library,
184 "path_sprites",
185 "path_sprite_vertex",
186 "path_sprite_fragment",
187 MTLPixelFormat::BGRA8Unorm,
188 );
189 let shadows_pipeline_state = build_pipeline_state(
190 &device,
191 &library,
192 "shadows",
193 "shadow_vertex",
194 "shadow_fragment",
195 MTLPixelFormat::BGRA8Unorm,
196 );
197 let quads_pipeline_state = build_pipeline_state(
198 &device,
199 &library,
200 "quads",
201 "quad_vertex",
202 "quad_fragment",
203 MTLPixelFormat::BGRA8Unorm,
204 );
205 let underlines_pipeline_state = build_pipeline_state(
206 &device,
207 &library,
208 "underlines",
209 "underline_vertex",
210 "underline_fragment",
211 MTLPixelFormat::BGRA8Unorm,
212 );
213 let monochrome_sprites_pipeline_state = build_pipeline_state(
214 &device,
215 &library,
216 "monochrome_sprites",
217 "monochrome_sprite_vertex",
218 "monochrome_sprite_fragment",
219 MTLPixelFormat::BGRA8Unorm,
220 );
221 let polychrome_sprites_pipeline_state = build_pipeline_state(
222 &device,
223 &library,
224 "polychrome_sprites",
225 "polychrome_sprite_vertex",
226 "polychrome_sprite_fragment",
227 MTLPixelFormat::BGRA8Unorm,
228 );
229 let surfaces_pipeline_state = build_pipeline_state(
230 &device,
231 &library,
232 "surfaces",
233 "surface_vertex",
234 "surface_fragment",
235 MTLPixelFormat::BGRA8Unorm,
236 );
237
238 let command_queue = device.new_command_queue();
239 let sprite_atlas = Arc::new(MetalAtlas::new(device.clone(), PATH_SAMPLE_COUNT));
240 let core_video_texture_cache =
241 CVMetalTextureCache::new(None, device.clone(), None).unwrap();
242
243 Self {
244 device,
245 layer,
246 presents_with_transaction: false,
247 command_queue,
248 paths_rasterization_pipeline_state,
249 path_sprites_pipeline_state,
250 shadows_pipeline_state,
251 quads_pipeline_state,
252 underlines_pipeline_state,
253 monochrome_sprites_pipeline_state,
254 polychrome_sprites_pipeline_state,
255 surfaces_pipeline_state,
256 unit_vertices,
257 instance_buffer_pool,
258 sprite_atlas,
259 core_video_texture_cache,
260 }
261 }
262
263 pub fn layer(&self) -> &metal::MetalLayerRef {
264 &self.layer
265 }
266
267 pub fn layer_ptr(&self) -> *mut CAMetalLayer {
268 self.layer.as_ptr()
269 }
270
271 pub fn sprite_atlas(&self) -> &Arc<MetalAtlas> {
272 &self.sprite_atlas
273 }
274
275 pub fn set_presents_with_transaction(&mut self, presents_with_transaction: bool) {
276 self.presents_with_transaction = presents_with_transaction;
277 self.layer
278 .set_presents_with_transaction(presents_with_transaction);
279 }
280
281 pub fn update_drawable_size(&mut self, size: Size<DevicePixels>) {
282 let size = NSSize {
283 width: size.width.0 as f64,
284 height: size.height.0 as f64,
285 };
286 unsafe {
287 let _: () = msg_send![
288 self.layer(),
289 setDrawableSize: size
290 ];
291 }
292 }
293
294 pub fn update_transparency(&self, _transparent: bool) {
295 // todo(mac)?
296 }
297
298 pub fn destroy(&self) {
299 // nothing to do
300 }
301
302 pub fn draw(&mut self, scene: &Scene) {
303 let layer = self.layer.clone();
304 let viewport_size = layer.drawable_size();
305 let viewport_size: Size<DevicePixels> = size(
306 (viewport_size.width.ceil() as i32).into(),
307 (viewport_size.height.ceil() as i32).into(),
308 );
309 let drawable = if let Some(drawable) = layer.next_drawable() {
310 drawable
311 } else {
312 log::error!(
313 "failed to retrieve next drawable, drawable size: {:?}",
314 viewport_size
315 );
316 return;
317 };
318
319 loop {
320 let mut instance_buffer = self.instance_buffer_pool.lock().acquire(&self.device);
321
322 let command_buffer =
323 self.draw_primitives(scene, &mut instance_buffer, drawable, viewport_size);
324
325 match command_buffer {
326 Ok(command_buffer) => {
327 let instance_buffer_pool = self.instance_buffer_pool.clone();
328 let instance_buffer = Cell::new(Some(instance_buffer));
329 let block = ConcreteBlock::new(move |_| {
330 if let Some(instance_buffer) = instance_buffer.take() {
331 instance_buffer_pool.lock().release(instance_buffer);
332 }
333 });
334 let block = block.copy();
335 command_buffer.add_completed_handler(&block);
336
337 if self.presents_with_transaction {
338 command_buffer.commit();
339 command_buffer.wait_until_scheduled();
340 drawable.present();
341 } else {
342 command_buffer.present_drawable(drawable);
343 command_buffer.commit();
344 }
345 return;
346 }
347 Err(err) => {
348 log::error!(
349 "failed to render: {}. retrying with larger instance buffer size",
350 err
351 );
352 let mut instance_buffer_pool = self.instance_buffer_pool.lock();
353 let buffer_size = instance_buffer_pool.buffer_size;
354 if buffer_size >= 256 * 1024 * 1024 {
355 log::error!("instance buffer size grew too large: {}", buffer_size);
356 break;
357 }
358 instance_buffer_pool.reset(buffer_size * 2);
359 log::info!(
360 "increased instance buffer size to {}",
361 instance_buffer_pool.buffer_size
362 );
363 }
364 }
365 }
366 }
367
368 fn draw_primitives(
369 &mut self,
370 scene: &Scene,
371 instance_buffer: &mut InstanceBuffer,
372 drawable: &metal::MetalDrawableRef,
373 viewport_size: Size<DevicePixels>,
374 ) -> Result<metal::CommandBuffer> {
375 let command_queue = self.command_queue.clone();
376 let command_buffer = command_queue.new_command_buffer();
377 let mut instance_offset = 0;
378
379 let path_tiles = self
380 .rasterize_paths(
381 scene.paths(),
382 instance_buffer,
383 &mut instance_offset,
384 command_buffer,
385 )
386 .with_context(|| format!("rasterizing {} paths", scene.paths().len()))?;
387
388 let render_pass_descriptor = metal::RenderPassDescriptor::new();
389 let color_attachment = render_pass_descriptor
390 .color_attachments()
391 .object_at(0)
392 .unwrap();
393
394 color_attachment.set_texture(Some(drawable.texture()));
395 color_attachment.set_load_action(metal::MTLLoadAction::Clear);
396 color_attachment.set_store_action(metal::MTLStoreAction::Store);
397 let alpha = if self.layer.is_opaque() { 1. } else { 0. };
398 color_attachment.set_clear_color(metal::MTLClearColor::new(0., 0., 0., alpha));
399 let command_encoder = command_buffer.new_render_command_encoder(render_pass_descriptor);
400
401 command_encoder.set_viewport(metal::MTLViewport {
402 originX: 0.0,
403 originY: 0.0,
404 width: i32::from(viewport_size.width) as f64,
405 height: i32::from(viewport_size.height) as f64,
406 znear: 0.0,
407 zfar: 1.0,
408 });
409
410 for batch in scene.batches() {
411 let ok = match batch {
412 PrimitiveBatch::Shadows(shadows) => self.draw_shadows(
413 shadows,
414 instance_buffer,
415 &mut instance_offset,
416 viewport_size,
417 command_encoder,
418 ),
419 PrimitiveBatch::Quads(quads) => self.draw_quads(
420 quads,
421 instance_buffer,
422 &mut instance_offset,
423 viewport_size,
424 command_encoder,
425 ),
426 PrimitiveBatch::Paths(paths) => self.draw_paths(
427 paths,
428 &path_tiles,
429 instance_buffer,
430 &mut instance_offset,
431 viewport_size,
432 command_encoder,
433 ),
434 PrimitiveBatch::Underlines(underlines) => self.draw_underlines(
435 underlines,
436 instance_buffer,
437 &mut instance_offset,
438 viewport_size,
439 command_encoder,
440 ),
441 PrimitiveBatch::MonochromeSprites {
442 texture_id,
443 sprites,
444 } => self.draw_monochrome_sprites(
445 texture_id,
446 sprites,
447 instance_buffer,
448 &mut instance_offset,
449 viewport_size,
450 command_encoder,
451 ),
452 PrimitiveBatch::PolychromeSprites {
453 texture_id,
454 sprites,
455 } => self.draw_polychrome_sprites(
456 texture_id,
457 sprites,
458 instance_buffer,
459 &mut instance_offset,
460 viewport_size,
461 command_encoder,
462 ),
463 PrimitiveBatch::Surfaces(surfaces) => self.draw_surfaces(
464 surfaces,
465 instance_buffer,
466 &mut instance_offset,
467 viewport_size,
468 command_encoder,
469 ),
470 };
471
472 if !ok {
473 command_encoder.end_encoding();
474 anyhow::bail!(
475 "scene too large: {} paths, {} shadows, {} quads, {} underlines, {} mono, {} poly, {} surfaces",
476 scene.paths.len(),
477 scene.shadows.len(),
478 scene.quads.len(),
479 scene.underlines.len(),
480 scene.monochrome_sprites.len(),
481 scene.polychrome_sprites.len(),
482 scene.surfaces.len(),
483 );
484 }
485 }
486
487 command_encoder.end_encoding();
488
489 instance_buffer.metal_buffer.did_modify_range(NSRange {
490 location: 0,
491 length: instance_offset as NSUInteger,
492 });
493 Ok(command_buffer.to_owned())
494 }
495
496 fn rasterize_paths(
497 &self,
498 paths: &[Path<ScaledPixels>],
499 instance_buffer: &mut InstanceBuffer,
500 instance_offset: &mut usize,
501 command_buffer: &metal::CommandBufferRef,
502 ) -> Option<HashMap<PathId, AtlasTile>> {
503 self.sprite_atlas.clear_textures(AtlasTextureKind::Path);
504
505 let mut tiles = HashMap::default();
506 let mut vertices_by_texture_id = HashMap::default();
507 for path in paths {
508 let clipped_bounds = path.bounds.intersect(&path.content_mask.bounds);
509
510 let tile = self
511 .sprite_atlas
512 .allocate(clipped_bounds.size.map(Into::into), AtlasTextureKind::Path)?;
513 vertices_by_texture_id
514 .entry(tile.texture_id)
515 .or_insert(Vec::new())
516 .extend(path.vertices.iter().map(|vertex| PathVertex {
517 xy_position: vertex.xy_position - clipped_bounds.origin
518 + tile.bounds.origin.map(Into::into),
519 st_position: vertex.st_position,
520 content_mask: ContentMask {
521 bounds: tile.bounds.map(Into::into),
522 },
523 }));
524 tiles.insert(path.id, tile);
525 }
526
527 for (texture_id, vertices) in vertices_by_texture_id {
528 align_offset(instance_offset);
529 let vertices_bytes_len = mem::size_of_val(vertices.as_slice());
530 let next_offset = *instance_offset + vertices_bytes_len;
531 if next_offset > instance_buffer.size {
532 return None;
533 }
534
535 let render_pass_descriptor = metal::RenderPassDescriptor::new();
536 let color_attachment = render_pass_descriptor
537 .color_attachments()
538 .object_at(0)
539 .unwrap();
540
541 let texture = self.sprite_atlas.metal_texture(texture_id);
542 let msaa_texture = self.sprite_atlas.msaa_texture(texture_id);
543
544 if let Some(msaa_texture) = msaa_texture {
545 color_attachment.set_texture(Some(&msaa_texture));
546 color_attachment.set_resolve_texture(Some(&texture));
547 color_attachment.set_load_action(metal::MTLLoadAction::Clear);
548 color_attachment.set_store_action(metal::MTLStoreAction::MultisampleResolve);
549 } else {
550 color_attachment.set_texture(Some(&texture));
551 color_attachment.set_load_action(metal::MTLLoadAction::Clear);
552 color_attachment.set_store_action(metal::MTLStoreAction::Store);
553 }
554 color_attachment.set_clear_color(metal::MTLClearColor::new(0., 0., 0., 1.));
555
556 let command_encoder = command_buffer.new_render_command_encoder(render_pass_descriptor);
557 command_encoder.set_render_pipeline_state(&self.paths_rasterization_pipeline_state);
558 command_encoder.set_vertex_buffer(
559 PathRasterizationInputIndex::Vertices as u64,
560 Some(&instance_buffer.metal_buffer),
561 *instance_offset as u64,
562 );
563 let texture_size = Size {
564 width: DevicePixels::from(texture.width()),
565 height: DevicePixels::from(texture.height()),
566 };
567 command_encoder.set_vertex_bytes(
568 PathRasterizationInputIndex::AtlasTextureSize as u64,
569 mem::size_of_val(&texture_size) as u64,
570 &texture_size as *const Size<DevicePixels> as *const _,
571 );
572
573 let buffer_contents = unsafe {
574 (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset)
575 };
576 unsafe {
577 ptr::copy_nonoverlapping(
578 vertices.as_ptr() as *const u8,
579 buffer_contents,
580 vertices_bytes_len,
581 );
582 }
583
584 command_encoder.draw_primitives(
585 metal::MTLPrimitiveType::Triangle,
586 0,
587 vertices.len() as u64,
588 );
589 command_encoder.end_encoding();
590 *instance_offset = next_offset;
591 }
592
593 Some(tiles)
594 }
595
596 fn draw_shadows(
597 &self,
598 shadows: &[Shadow],
599 instance_buffer: &mut InstanceBuffer,
600 instance_offset: &mut usize,
601 viewport_size: Size<DevicePixels>,
602 command_encoder: &metal::RenderCommandEncoderRef,
603 ) -> bool {
604 if shadows.is_empty() {
605 return true;
606 }
607 align_offset(instance_offset);
608
609 command_encoder.set_render_pipeline_state(&self.shadows_pipeline_state);
610 command_encoder.set_vertex_buffer(
611 ShadowInputIndex::Vertices as u64,
612 Some(&self.unit_vertices),
613 0,
614 );
615 command_encoder.set_vertex_buffer(
616 ShadowInputIndex::Shadows as u64,
617 Some(&instance_buffer.metal_buffer),
618 *instance_offset as u64,
619 );
620 command_encoder.set_fragment_buffer(
621 ShadowInputIndex::Shadows as u64,
622 Some(&instance_buffer.metal_buffer),
623 *instance_offset as u64,
624 );
625
626 command_encoder.set_vertex_bytes(
627 ShadowInputIndex::ViewportSize as u64,
628 mem::size_of_val(&viewport_size) as u64,
629 &viewport_size as *const Size<DevicePixels> as *const _,
630 );
631
632 let shadow_bytes_len = mem::size_of_val(shadows);
633 let buffer_contents =
634 unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
635
636 let next_offset = *instance_offset + shadow_bytes_len;
637 if next_offset > instance_buffer.size {
638 return false;
639 }
640
641 unsafe {
642 ptr::copy_nonoverlapping(
643 shadows.as_ptr() as *const u8,
644 buffer_contents,
645 shadow_bytes_len,
646 );
647 }
648
649 command_encoder.draw_primitives_instanced(
650 metal::MTLPrimitiveType::Triangle,
651 0,
652 6,
653 shadows.len() as u64,
654 );
655 *instance_offset = next_offset;
656 true
657 }
658
659 fn draw_quads(
660 &self,
661 quads: &[Quad],
662 instance_buffer: &mut InstanceBuffer,
663 instance_offset: &mut usize,
664 viewport_size: Size<DevicePixels>,
665 command_encoder: &metal::RenderCommandEncoderRef,
666 ) -> bool {
667 if quads.is_empty() {
668 return true;
669 }
670 align_offset(instance_offset);
671
672 command_encoder.set_render_pipeline_state(&self.quads_pipeline_state);
673 command_encoder.set_vertex_buffer(
674 QuadInputIndex::Vertices as u64,
675 Some(&self.unit_vertices),
676 0,
677 );
678 command_encoder.set_vertex_buffer(
679 QuadInputIndex::Quads as u64,
680 Some(&instance_buffer.metal_buffer),
681 *instance_offset as u64,
682 );
683 command_encoder.set_fragment_buffer(
684 QuadInputIndex::Quads as u64,
685 Some(&instance_buffer.metal_buffer),
686 *instance_offset as u64,
687 );
688
689 command_encoder.set_vertex_bytes(
690 QuadInputIndex::ViewportSize as u64,
691 mem::size_of_val(&viewport_size) as u64,
692 &viewport_size as *const Size<DevicePixels> as *const _,
693 );
694
695 let quad_bytes_len = mem::size_of_val(quads);
696 let buffer_contents =
697 unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
698
699 let next_offset = *instance_offset + quad_bytes_len;
700 if next_offset > instance_buffer.size {
701 return false;
702 }
703
704 unsafe {
705 ptr::copy_nonoverlapping(quads.as_ptr() as *const u8, buffer_contents, quad_bytes_len);
706 }
707
708 command_encoder.draw_primitives_instanced(
709 metal::MTLPrimitiveType::Triangle,
710 0,
711 6,
712 quads.len() as u64,
713 );
714 *instance_offset = next_offset;
715 true
716 }
717
718 fn draw_paths(
719 &self,
720 paths: &[Path<ScaledPixels>],
721 tiles_by_path_id: &HashMap<PathId, AtlasTile>,
722 instance_buffer: &mut InstanceBuffer,
723 instance_offset: &mut usize,
724 viewport_size: Size<DevicePixels>,
725 command_encoder: &metal::RenderCommandEncoderRef,
726 ) -> bool {
727 if paths.is_empty() {
728 return true;
729 }
730
731 command_encoder.set_render_pipeline_state(&self.path_sprites_pipeline_state);
732 command_encoder.set_vertex_buffer(
733 SpriteInputIndex::Vertices as u64,
734 Some(&self.unit_vertices),
735 0,
736 );
737 command_encoder.set_vertex_bytes(
738 SpriteInputIndex::ViewportSize as u64,
739 mem::size_of_val(&viewport_size) as u64,
740 &viewport_size as *const Size<DevicePixels> as *const _,
741 );
742
743 let mut prev_texture_id = None;
744 let mut sprites = SmallVec::<[_; 1]>::new();
745 let mut paths_and_tiles = paths
746 .iter()
747 .map(|path| (path, tiles_by_path_id.get(&path.id).unwrap()))
748 .peekable();
749
750 loop {
751 if let Some((path, tile)) = paths_and_tiles.peek() {
752 if prev_texture_id.map_or(true, |texture_id| texture_id == tile.texture_id) {
753 prev_texture_id = Some(tile.texture_id);
754 let origin = path.bounds.intersect(&path.content_mask.bounds).origin;
755 sprites.push(PathSprite {
756 bounds: Bounds {
757 origin: origin.map(|p| p.floor()),
758 size: tile.bounds.size.map(Into::into),
759 },
760 color: path.color,
761 tile: (*tile).clone(),
762 });
763 paths_and_tiles.next();
764 continue;
765 }
766 }
767
768 if sprites.is_empty() {
769 break;
770 } else {
771 align_offset(instance_offset);
772 let texture_id = prev_texture_id.take().unwrap();
773 let texture: metal::Texture = self.sprite_atlas.metal_texture(texture_id);
774 let texture_size = size(
775 DevicePixels(texture.width() as i32),
776 DevicePixels(texture.height() as i32),
777 );
778
779 command_encoder.set_vertex_buffer(
780 SpriteInputIndex::Sprites as u64,
781 Some(&instance_buffer.metal_buffer),
782 *instance_offset as u64,
783 );
784 command_encoder.set_vertex_bytes(
785 SpriteInputIndex::AtlasTextureSize as u64,
786 mem::size_of_val(&texture_size) as u64,
787 &texture_size as *const Size<DevicePixels> as *const _,
788 );
789 command_encoder.set_fragment_buffer(
790 SpriteInputIndex::Sprites as u64,
791 Some(&instance_buffer.metal_buffer),
792 *instance_offset as u64,
793 );
794 command_encoder
795 .set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
796
797 let sprite_bytes_len = mem::size_of_val(sprites.as_slice());
798 let next_offset = *instance_offset + sprite_bytes_len;
799 if next_offset > instance_buffer.size {
800 return false;
801 }
802
803 let buffer_contents = unsafe {
804 (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset)
805 };
806
807 unsafe {
808 ptr::copy_nonoverlapping(
809 sprites.as_ptr() as *const u8,
810 buffer_contents,
811 sprite_bytes_len,
812 );
813 }
814
815 command_encoder.draw_primitives_instanced(
816 metal::MTLPrimitiveType::Triangle,
817 0,
818 6,
819 sprites.len() as u64,
820 );
821 *instance_offset = next_offset;
822 sprites.clear();
823 }
824 }
825 true
826 }
827
828 fn draw_underlines(
829 &self,
830 underlines: &[Underline],
831 instance_buffer: &mut InstanceBuffer,
832 instance_offset: &mut usize,
833 viewport_size: Size<DevicePixels>,
834 command_encoder: &metal::RenderCommandEncoderRef,
835 ) -> bool {
836 if underlines.is_empty() {
837 return true;
838 }
839 align_offset(instance_offset);
840
841 command_encoder.set_render_pipeline_state(&self.underlines_pipeline_state);
842 command_encoder.set_vertex_buffer(
843 UnderlineInputIndex::Vertices as u64,
844 Some(&self.unit_vertices),
845 0,
846 );
847 command_encoder.set_vertex_buffer(
848 UnderlineInputIndex::Underlines as u64,
849 Some(&instance_buffer.metal_buffer),
850 *instance_offset as u64,
851 );
852 command_encoder.set_fragment_buffer(
853 UnderlineInputIndex::Underlines as u64,
854 Some(&instance_buffer.metal_buffer),
855 *instance_offset as u64,
856 );
857
858 command_encoder.set_vertex_bytes(
859 UnderlineInputIndex::ViewportSize as u64,
860 mem::size_of_val(&viewport_size) as u64,
861 &viewport_size as *const Size<DevicePixels> as *const _,
862 );
863
864 let underline_bytes_len = mem::size_of_val(underlines);
865 let buffer_contents =
866 unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
867
868 let next_offset = *instance_offset + underline_bytes_len;
869 if next_offset > instance_buffer.size {
870 return false;
871 }
872
873 unsafe {
874 ptr::copy_nonoverlapping(
875 underlines.as_ptr() as *const u8,
876 buffer_contents,
877 underline_bytes_len,
878 );
879 }
880
881 command_encoder.draw_primitives_instanced(
882 metal::MTLPrimitiveType::Triangle,
883 0,
884 6,
885 underlines.len() as u64,
886 );
887 *instance_offset = next_offset;
888 true
889 }
890
891 fn draw_monochrome_sprites(
892 &self,
893 texture_id: AtlasTextureId,
894 sprites: &[MonochromeSprite],
895 instance_buffer: &mut InstanceBuffer,
896 instance_offset: &mut usize,
897 viewport_size: Size<DevicePixels>,
898 command_encoder: &metal::RenderCommandEncoderRef,
899 ) -> bool {
900 if sprites.is_empty() {
901 return true;
902 }
903 align_offset(instance_offset);
904
905 let sprite_bytes_len = mem::size_of_val(sprites);
906 let buffer_contents =
907 unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
908
909 let next_offset = *instance_offset + sprite_bytes_len;
910 if next_offset > instance_buffer.size {
911 return false;
912 }
913
914 let texture = self.sprite_atlas.metal_texture(texture_id);
915 let texture_size = size(
916 DevicePixels(texture.width() as i32),
917 DevicePixels(texture.height() as i32),
918 );
919 command_encoder.set_render_pipeline_state(&self.monochrome_sprites_pipeline_state);
920 command_encoder.set_vertex_buffer(
921 SpriteInputIndex::Vertices as u64,
922 Some(&self.unit_vertices),
923 0,
924 );
925 command_encoder.set_vertex_buffer(
926 SpriteInputIndex::Sprites as u64,
927 Some(&instance_buffer.metal_buffer),
928 *instance_offset as u64,
929 );
930 command_encoder.set_vertex_bytes(
931 SpriteInputIndex::ViewportSize as u64,
932 mem::size_of_val(&viewport_size) as u64,
933 &viewport_size as *const Size<DevicePixels> as *const _,
934 );
935 command_encoder.set_vertex_bytes(
936 SpriteInputIndex::AtlasTextureSize as u64,
937 mem::size_of_val(&texture_size) as u64,
938 &texture_size as *const Size<DevicePixels> as *const _,
939 );
940 command_encoder.set_fragment_buffer(
941 SpriteInputIndex::Sprites as u64,
942 Some(&instance_buffer.metal_buffer),
943 *instance_offset as u64,
944 );
945 command_encoder.set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
946
947 unsafe {
948 ptr::copy_nonoverlapping(
949 sprites.as_ptr() as *const u8,
950 buffer_contents,
951 sprite_bytes_len,
952 );
953 }
954
955 command_encoder.draw_primitives_instanced(
956 metal::MTLPrimitiveType::Triangle,
957 0,
958 6,
959 sprites.len() as u64,
960 );
961 *instance_offset = next_offset;
962 true
963 }
964
965 fn draw_polychrome_sprites(
966 &self,
967 texture_id: AtlasTextureId,
968 sprites: &[PolychromeSprite],
969 instance_buffer: &mut InstanceBuffer,
970 instance_offset: &mut usize,
971 viewport_size: Size<DevicePixels>,
972 command_encoder: &metal::RenderCommandEncoderRef,
973 ) -> bool {
974 if sprites.is_empty() {
975 return true;
976 }
977 align_offset(instance_offset);
978
979 let texture = self.sprite_atlas.metal_texture(texture_id);
980 let texture_size = size(
981 DevicePixels(texture.width() as i32),
982 DevicePixels(texture.height() as i32),
983 );
984 command_encoder.set_render_pipeline_state(&self.polychrome_sprites_pipeline_state);
985 command_encoder.set_vertex_buffer(
986 SpriteInputIndex::Vertices as u64,
987 Some(&self.unit_vertices),
988 0,
989 );
990 command_encoder.set_vertex_buffer(
991 SpriteInputIndex::Sprites as u64,
992 Some(&instance_buffer.metal_buffer),
993 *instance_offset as u64,
994 );
995 command_encoder.set_vertex_bytes(
996 SpriteInputIndex::ViewportSize as u64,
997 mem::size_of_val(&viewport_size) as u64,
998 &viewport_size as *const Size<DevicePixels> as *const _,
999 );
1000 command_encoder.set_vertex_bytes(
1001 SpriteInputIndex::AtlasTextureSize as u64,
1002 mem::size_of_val(&texture_size) as u64,
1003 &texture_size as *const Size<DevicePixels> as *const _,
1004 );
1005 command_encoder.set_fragment_buffer(
1006 SpriteInputIndex::Sprites as u64,
1007 Some(&instance_buffer.metal_buffer),
1008 *instance_offset as u64,
1009 );
1010 command_encoder.set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
1011
1012 let sprite_bytes_len = mem::size_of_val(sprites);
1013 let buffer_contents =
1014 unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
1015
1016 let next_offset = *instance_offset + sprite_bytes_len;
1017 if next_offset > instance_buffer.size {
1018 return false;
1019 }
1020
1021 unsafe {
1022 ptr::copy_nonoverlapping(
1023 sprites.as_ptr() as *const u8,
1024 buffer_contents,
1025 sprite_bytes_len,
1026 );
1027 }
1028
1029 command_encoder.draw_primitives_instanced(
1030 metal::MTLPrimitiveType::Triangle,
1031 0,
1032 6,
1033 sprites.len() as u64,
1034 );
1035 *instance_offset = next_offset;
1036 true
1037 }
1038
1039 fn draw_surfaces(
1040 &mut self,
1041 surfaces: &[PaintSurface],
1042 instance_buffer: &mut InstanceBuffer,
1043 instance_offset: &mut usize,
1044 viewport_size: Size<DevicePixels>,
1045 command_encoder: &metal::RenderCommandEncoderRef,
1046 ) -> bool {
1047 command_encoder.set_render_pipeline_state(&self.surfaces_pipeline_state);
1048 command_encoder.set_vertex_buffer(
1049 SurfaceInputIndex::Vertices as u64,
1050 Some(&self.unit_vertices),
1051 0,
1052 );
1053 command_encoder.set_vertex_bytes(
1054 SurfaceInputIndex::ViewportSize as u64,
1055 mem::size_of_val(&viewport_size) as u64,
1056 &viewport_size as *const Size<DevicePixels> as *const _,
1057 );
1058
1059 for surface in surfaces {
1060 let texture_size = size(
1061 DevicePixels::from(surface.image_buffer.get_width() as i32),
1062 DevicePixels::from(surface.image_buffer.get_height() as i32),
1063 );
1064
1065 assert_eq!(
1066 surface.image_buffer.get_pixel_format(),
1067 kCVPixelFormatType_420YpCbCr8BiPlanarFullRange
1068 );
1069
1070 let y_texture = self
1071 .core_video_texture_cache
1072 .create_texture_from_image(
1073 surface.image_buffer.as_concrete_TypeRef(),
1074 None,
1075 MTLPixelFormat::R8Unorm,
1076 surface.image_buffer.get_width_of_plane(0),
1077 surface.image_buffer.get_height_of_plane(0),
1078 0,
1079 )
1080 .unwrap();
1081 let cb_cr_texture = self
1082 .core_video_texture_cache
1083 .create_texture_from_image(
1084 surface.image_buffer.as_concrete_TypeRef(),
1085 None,
1086 MTLPixelFormat::RG8Unorm,
1087 surface.image_buffer.get_width_of_plane(1),
1088 surface.image_buffer.get_height_of_plane(1),
1089 1,
1090 )
1091 .unwrap();
1092
1093 align_offset(instance_offset);
1094 let next_offset = *instance_offset + mem::size_of::<Surface>();
1095 if next_offset > instance_buffer.size {
1096 return false;
1097 }
1098
1099 command_encoder.set_vertex_buffer(
1100 SurfaceInputIndex::Surfaces as u64,
1101 Some(&instance_buffer.metal_buffer),
1102 *instance_offset as u64,
1103 );
1104 command_encoder.set_vertex_bytes(
1105 SurfaceInputIndex::TextureSize as u64,
1106 mem::size_of_val(&texture_size) as u64,
1107 &texture_size as *const Size<DevicePixels> as *const _,
1108 );
1109 // let y_texture = y_texture.get_texture().unwrap().
1110 command_encoder.set_fragment_texture(SurfaceInputIndex::YTexture as u64, unsafe {
1111 let texture = CVMetalTextureGetTexture(y_texture.as_concrete_TypeRef());
1112 Some(metal::TextureRef::from_ptr(texture as *mut _))
1113 });
1114 command_encoder.set_fragment_texture(SurfaceInputIndex::CbCrTexture as u64, unsafe {
1115 let texture = CVMetalTextureGetTexture(cb_cr_texture.as_concrete_TypeRef());
1116 Some(metal::TextureRef::from_ptr(texture as *mut _))
1117 });
1118
1119 unsafe {
1120 let buffer_contents = (instance_buffer.metal_buffer.contents() as *mut u8)
1121 .add(*instance_offset)
1122 as *mut SurfaceBounds;
1123 ptr::write(
1124 buffer_contents,
1125 SurfaceBounds {
1126 bounds: surface.bounds,
1127 content_mask: surface.content_mask.clone(),
1128 },
1129 );
1130 }
1131
1132 command_encoder.draw_primitives(metal::MTLPrimitiveType::Triangle, 0, 6);
1133 *instance_offset = next_offset;
1134 }
1135 true
1136 }
1137}
1138
1139fn build_pipeline_state(
1140 device: &metal::DeviceRef,
1141 library: &metal::LibraryRef,
1142 label: &str,
1143 vertex_fn_name: &str,
1144 fragment_fn_name: &str,
1145 pixel_format: metal::MTLPixelFormat,
1146) -> metal::RenderPipelineState {
1147 let vertex_fn = library
1148 .get_function(vertex_fn_name, None)
1149 .expect("error locating vertex function");
1150 let fragment_fn = library
1151 .get_function(fragment_fn_name, None)
1152 .expect("error locating fragment function");
1153
1154 let descriptor = metal::RenderPipelineDescriptor::new();
1155 descriptor.set_label(label);
1156 descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
1157 descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
1158 let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
1159 color_attachment.set_pixel_format(pixel_format);
1160 color_attachment.set_blending_enabled(true);
1161 color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
1162 color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
1163 color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::SourceAlpha);
1164 color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::One);
1165 color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
1166 color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::One);
1167
1168 device
1169 .new_render_pipeline_state(&descriptor)
1170 .expect("could not create render pipeline state")
1171}
1172
1173fn build_path_rasterization_pipeline_state(
1174 device: &metal::DeviceRef,
1175 library: &metal::LibraryRef,
1176 label: &str,
1177 vertex_fn_name: &str,
1178 fragment_fn_name: &str,
1179 pixel_format: metal::MTLPixelFormat,
1180 path_sample_count: u32,
1181) -> metal::RenderPipelineState {
1182 let vertex_fn = library
1183 .get_function(vertex_fn_name, None)
1184 .expect("error locating vertex function");
1185 let fragment_fn = library
1186 .get_function(fragment_fn_name, None)
1187 .expect("error locating fragment function");
1188
1189 let descriptor = metal::RenderPipelineDescriptor::new();
1190 descriptor.set_label(label);
1191 descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
1192 descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
1193 if path_sample_count > 1 {
1194 descriptor.set_raster_sample_count(path_sample_count as _);
1195 descriptor.set_alpha_to_coverage_enabled(true);
1196 }
1197 let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
1198 color_attachment.set_pixel_format(pixel_format);
1199 color_attachment.set_blending_enabled(true);
1200 color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
1201 color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
1202 color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::One);
1203 color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::One);
1204 color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::One);
1205 color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::One);
1206
1207 device
1208 .new_render_pipeline_state(&descriptor)
1209 .expect("could not create render pipeline state")
1210}
1211
1212// Align to multiples of 256 make Metal happy.
1213fn align_offset(offset: &mut usize) {
1214 *offset = (*offset).div_ceil(256) * 256;
1215}
1216
1217#[repr(C)]
1218enum ShadowInputIndex {
1219 Vertices = 0,
1220 Shadows = 1,
1221 ViewportSize = 2,
1222}
1223
1224#[repr(C)]
1225enum QuadInputIndex {
1226 Vertices = 0,
1227 Quads = 1,
1228 ViewportSize = 2,
1229}
1230
1231#[repr(C)]
1232enum UnderlineInputIndex {
1233 Vertices = 0,
1234 Underlines = 1,
1235 ViewportSize = 2,
1236}
1237
1238#[repr(C)]
1239enum SpriteInputIndex {
1240 Vertices = 0,
1241 Sprites = 1,
1242 ViewportSize = 2,
1243 AtlasTextureSize = 3,
1244 AtlasTexture = 4,
1245}
1246
1247#[repr(C)]
1248enum SurfaceInputIndex {
1249 Vertices = 0,
1250 Surfaces = 1,
1251 ViewportSize = 2,
1252 TextureSize = 3,
1253 YTexture = 4,
1254 CbCrTexture = 5,
1255}
1256
1257#[repr(C)]
1258enum PathRasterizationInputIndex {
1259 Vertices = 0,
1260 AtlasTextureSize = 1,
1261}
1262
1263#[derive(Clone, Debug, Eq, PartialEq)]
1264#[repr(C)]
1265pub struct PathSprite {
1266 pub bounds: Bounds<ScaledPixels>,
1267 pub color: Background,
1268 pub tile: AtlasTile,
1269}
1270
1271#[derive(Clone, Debug, Eq, PartialEq)]
1272#[repr(C)]
1273pub struct SurfaceBounds {
1274 pub bounds: Bounds<ScaledPixels>,
1275 pub content_mask: ContentMask<ScaledPixels>,
1276}