1use super::metal_atlas::MetalAtlas;
2use crate::{
3 point, size, AtlasTextureId, AtlasTextureKind, AtlasTile, Background, Bounds, ContentMask,
4 DevicePixels, MonochromeSprite, PaintSurface, Path, PathId, PathVertex, PolychromeSprite,
5 PrimitiveBatch, Quad, ScaledPixels, Scene, Shadow, Size, Surface, Underline,
6};
7use anyhow::{anyhow, Result};
8use block::ConcreteBlock;
9use cocoa::{
10 base::{NO, YES},
11 foundation::{NSSize, NSUInteger},
12 quartzcore::AutoresizingMask,
13};
14use collections::HashMap;
15use core_foundation::base::TCFType;
16use core_video::{
17 metal_texture::CVMetalTextureGetTexture, metal_texture_cache::CVMetalTextureCache,
18 pixel_buffer::kCVPixelFormatType_420YpCbCr8BiPlanarFullRange,
19};
20use foreign_types::{ForeignType, ForeignTypeRef};
21use metal::{CAMetalLayer, CommandQueue, MTLPixelFormat, MTLResourceOptions, NSRange};
22use objc::{self, msg_send, sel, sel_impl};
23use parking_lot::Mutex;
24use smallvec::SmallVec;
25use std::{cell::Cell, ffi::c_void, mem, ptr, sync::Arc};
26
27// Exported to metal
28pub(crate) type PointF = crate::Point<f32>;
29
30#[cfg(not(feature = "runtime_shaders"))]
31const SHADERS_METALLIB: &[u8] = include_bytes!(concat!(env!("OUT_DIR"), "/shaders.metallib"));
32#[cfg(feature = "runtime_shaders")]
33const SHADERS_SOURCE_FILE: &str = include_str!(concat!(env!("OUT_DIR"), "/stitched_shaders.metal"));
34// Use 4x MSAA, all devices support it.
35// https://developer.apple.com/documentation/metal/mtldevice/1433355-supportstexturesamplecount
36const PATH_SAMPLE_COUNT: u32 = 4;
37
38pub type Context = Arc<Mutex<InstanceBufferPool>>;
39pub type Renderer = MetalRenderer;
40
41pub unsafe fn new_renderer(
42 context: self::Context,
43 _native_window: *mut c_void,
44 _native_view: *mut c_void,
45 _bounds: crate::Size<f32>,
46 _transparent: bool,
47) -> Renderer {
48 MetalRenderer::new(context)
49}
50
51pub(crate) struct InstanceBufferPool {
52 buffer_size: usize,
53 buffers: Vec<metal::Buffer>,
54}
55
56impl Default for InstanceBufferPool {
57 fn default() -> Self {
58 Self {
59 buffer_size: 2 * 1024 * 1024,
60 buffers: Vec::new(),
61 }
62 }
63}
64
65pub(crate) struct InstanceBuffer {
66 metal_buffer: metal::Buffer,
67 size: usize,
68}
69
70impl InstanceBufferPool {
71 pub(crate) fn reset(&mut self, buffer_size: usize) {
72 self.buffer_size = buffer_size;
73 self.buffers.clear();
74 }
75
76 pub(crate) fn acquire(&mut self, device: &metal::Device) -> InstanceBuffer {
77 let buffer = self.buffers.pop().unwrap_or_else(|| {
78 device.new_buffer(
79 self.buffer_size as u64,
80 MTLResourceOptions::StorageModeManaged,
81 )
82 });
83 InstanceBuffer {
84 metal_buffer: buffer,
85 size: self.buffer_size,
86 }
87 }
88
89 pub(crate) fn release(&mut self, buffer: InstanceBuffer) {
90 if buffer.size == self.buffer_size {
91 self.buffers.push(buffer.metal_buffer)
92 }
93 }
94}
95
96pub(crate) struct MetalRenderer {
97 device: metal::Device,
98 layer: metal::MetalLayer,
99 presents_with_transaction: bool,
100 command_queue: CommandQueue,
101 paths_rasterization_pipeline_state: metal::RenderPipelineState,
102 path_sprites_pipeline_state: metal::RenderPipelineState,
103 shadows_pipeline_state: metal::RenderPipelineState,
104 quads_pipeline_state: metal::RenderPipelineState,
105 underlines_pipeline_state: metal::RenderPipelineState,
106 monochrome_sprites_pipeline_state: metal::RenderPipelineState,
107 polychrome_sprites_pipeline_state: metal::RenderPipelineState,
108 surfaces_pipeline_state: metal::RenderPipelineState,
109 unit_vertices: metal::Buffer,
110 #[allow(clippy::arc_with_non_send_sync)]
111 instance_buffer_pool: Arc<Mutex<InstanceBufferPool>>,
112 sprite_atlas: Arc<MetalAtlas>,
113 core_video_texture_cache: core_video::metal_texture_cache::CVMetalTextureCache,
114}
115
116impl MetalRenderer {
117 pub fn new(instance_buffer_pool: Arc<Mutex<InstanceBufferPool>>) -> Self {
118 // Prefer low‐power integrated GPUs on Intel Mac. On Apple
119 // Silicon, there is only ever one GPU, so this is equivalent to
120 // `metal::Device::system_default()`.
121 let mut devices = metal::Device::all();
122 devices.sort_by_key(|device| (device.is_removable(), device.is_low_power()));
123 let Some(device) = devices.pop() else {
124 log::error!("unable to access a compatible graphics device");
125 std::process::exit(1);
126 };
127
128 let layer = metal::MetalLayer::new();
129 layer.set_device(&device);
130 layer.set_pixel_format(MTLPixelFormat::BGRA8Unorm);
131 layer.set_opaque(false);
132 layer.set_maximum_drawable_count(3);
133 unsafe {
134 let _: () = msg_send![&*layer, setAllowsNextDrawableTimeout: NO];
135 let _: () = msg_send![&*layer, setNeedsDisplayOnBoundsChange: YES];
136 let _: () = msg_send![
137 &*layer,
138 setAutoresizingMask: AutoresizingMask::WIDTH_SIZABLE
139 | AutoresizingMask::HEIGHT_SIZABLE
140 ];
141 }
142 #[cfg(feature = "runtime_shaders")]
143 let library = device
144 .new_library_with_source(&SHADERS_SOURCE_FILE, &metal::CompileOptions::new())
145 .expect("error building metal library");
146 #[cfg(not(feature = "runtime_shaders"))]
147 let library = device
148 .new_library_with_data(SHADERS_METALLIB)
149 .expect("error building metal library");
150
151 fn to_float2_bits(point: PointF) -> u64 {
152 let mut output = point.y.to_bits() as u64;
153 output <<= 32;
154 output |= point.x.to_bits() as u64;
155 output
156 }
157
158 let unit_vertices = [
159 to_float2_bits(point(0., 0.)),
160 to_float2_bits(point(1., 0.)),
161 to_float2_bits(point(0., 1.)),
162 to_float2_bits(point(0., 1.)),
163 to_float2_bits(point(1., 0.)),
164 to_float2_bits(point(1., 1.)),
165 ];
166 let unit_vertices = device.new_buffer_with_data(
167 unit_vertices.as_ptr() as *const c_void,
168 mem::size_of_val(&unit_vertices) as u64,
169 MTLResourceOptions::StorageModeManaged,
170 );
171
172 let paths_rasterization_pipeline_state = build_path_rasterization_pipeline_state(
173 &device,
174 &library,
175 "paths_rasterization",
176 "path_rasterization_vertex",
177 "path_rasterization_fragment",
178 MTLPixelFormat::R16Float,
179 PATH_SAMPLE_COUNT,
180 );
181 let path_sprites_pipeline_state = build_pipeline_state(
182 &device,
183 &library,
184 "path_sprites",
185 "path_sprite_vertex",
186 "path_sprite_fragment",
187 MTLPixelFormat::BGRA8Unorm,
188 );
189 let shadows_pipeline_state = build_pipeline_state(
190 &device,
191 &library,
192 "shadows",
193 "shadow_vertex",
194 "shadow_fragment",
195 MTLPixelFormat::BGRA8Unorm,
196 );
197 let quads_pipeline_state = build_pipeline_state(
198 &device,
199 &library,
200 "quads",
201 "quad_vertex",
202 "quad_fragment",
203 MTLPixelFormat::BGRA8Unorm,
204 );
205 let underlines_pipeline_state = build_pipeline_state(
206 &device,
207 &library,
208 "underlines",
209 "underline_vertex",
210 "underline_fragment",
211 MTLPixelFormat::BGRA8Unorm,
212 );
213 let monochrome_sprites_pipeline_state = build_pipeline_state(
214 &device,
215 &library,
216 "monochrome_sprites",
217 "monochrome_sprite_vertex",
218 "monochrome_sprite_fragment",
219 MTLPixelFormat::BGRA8Unorm,
220 );
221 let polychrome_sprites_pipeline_state = build_pipeline_state(
222 &device,
223 &library,
224 "polychrome_sprites",
225 "polychrome_sprite_vertex",
226 "polychrome_sprite_fragment",
227 MTLPixelFormat::BGRA8Unorm,
228 );
229 let surfaces_pipeline_state = build_pipeline_state(
230 &device,
231 &library,
232 "surfaces",
233 "surface_vertex",
234 "surface_fragment",
235 MTLPixelFormat::BGRA8Unorm,
236 );
237
238 let command_queue = device.new_command_queue();
239 let sprite_atlas = Arc::new(MetalAtlas::new(device.clone(), PATH_SAMPLE_COUNT));
240 let core_video_texture_cache =
241 CVMetalTextureCache::new(None, device.clone(), None).unwrap();
242
243 Self {
244 device,
245 layer,
246 presents_with_transaction: false,
247 command_queue,
248 paths_rasterization_pipeline_state,
249 path_sprites_pipeline_state,
250 shadows_pipeline_state,
251 quads_pipeline_state,
252 underlines_pipeline_state,
253 monochrome_sprites_pipeline_state,
254 polychrome_sprites_pipeline_state,
255 surfaces_pipeline_state,
256 unit_vertices,
257 instance_buffer_pool,
258 sprite_atlas,
259 core_video_texture_cache,
260 }
261 }
262
263 pub fn layer(&self) -> &metal::MetalLayerRef {
264 &self.layer
265 }
266
267 pub fn layer_ptr(&self) -> *mut CAMetalLayer {
268 self.layer.as_ptr()
269 }
270
271 pub fn sprite_atlas(&self) -> &Arc<MetalAtlas> {
272 &self.sprite_atlas
273 }
274
275 pub fn set_presents_with_transaction(&mut self, presents_with_transaction: bool) {
276 self.presents_with_transaction = presents_with_transaction;
277 self.layer
278 .set_presents_with_transaction(presents_with_transaction);
279 }
280
281 pub fn update_drawable_size(&mut self, size: Size<DevicePixels>) {
282 let size = NSSize {
283 width: size.width.0 as f64,
284 height: size.height.0 as f64,
285 };
286 unsafe {
287 let _: () = msg_send![
288 self.layer(),
289 setDrawableSize: size
290 ];
291 }
292 }
293
294 pub fn update_transparency(&self, _transparent: bool) {
295 // todo(mac)?
296 }
297
298 pub fn destroy(&self) {
299 // nothing to do
300 }
301
302 pub fn draw(&mut self, scene: &Scene) {
303 let layer = self.layer.clone();
304 let viewport_size = layer.drawable_size();
305 let viewport_size: Size<DevicePixels> = size(
306 (viewport_size.width.ceil() as i32).into(),
307 (viewport_size.height.ceil() as i32).into(),
308 );
309 let drawable = if let Some(drawable) = layer.next_drawable() {
310 drawable
311 } else {
312 log::error!(
313 "failed to retrieve next drawable, drawable size: {:?}",
314 viewport_size
315 );
316 return;
317 };
318
319 loop {
320 let mut instance_buffer = self.instance_buffer_pool.lock().acquire(&self.device);
321
322 let command_buffer =
323 self.draw_primitives(scene, &mut instance_buffer, drawable, viewport_size);
324
325 match command_buffer {
326 Ok(command_buffer) => {
327 let instance_buffer_pool = self.instance_buffer_pool.clone();
328 let instance_buffer = Cell::new(Some(instance_buffer));
329 let block = ConcreteBlock::new(move |_| {
330 if let Some(instance_buffer) = instance_buffer.take() {
331 instance_buffer_pool.lock().release(instance_buffer);
332 }
333 });
334 let block = block.copy();
335 command_buffer.add_completed_handler(&block);
336
337 if self.presents_with_transaction {
338 command_buffer.commit();
339 command_buffer.wait_until_scheduled();
340 drawable.present();
341 } else {
342 command_buffer.present_drawable(drawable);
343 command_buffer.commit();
344 }
345 return;
346 }
347 Err(err) => {
348 log::error!(
349 "failed to render: {}. retrying with larger instance buffer size",
350 err
351 );
352 let mut instance_buffer_pool = self.instance_buffer_pool.lock();
353 let buffer_size = instance_buffer_pool.buffer_size;
354 if buffer_size >= 256 * 1024 * 1024 {
355 log::error!("instance buffer size grew too large: {}", buffer_size);
356 break;
357 }
358 instance_buffer_pool.reset(buffer_size * 2);
359 log::info!(
360 "increased instance buffer size to {}",
361 instance_buffer_pool.buffer_size
362 );
363 }
364 }
365 }
366 }
367
368 fn draw_primitives(
369 &mut self,
370 scene: &Scene,
371 instance_buffer: &mut InstanceBuffer,
372 drawable: &metal::MetalDrawableRef,
373 viewport_size: Size<DevicePixels>,
374 ) -> Result<metal::CommandBuffer> {
375 let command_queue = self.command_queue.clone();
376 let command_buffer = command_queue.new_command_buffer();
377 let mut instance_offset = 0;
378
379 let Some(path_tiles) = self.rasterize_paths(
380 scene.paths(),
381 instance_buffer,
382 &mut instance_offset,
383 command_buffer,
384 ) else {
385 return Err(anyhow!("failed to rasterize {} paths", scene.paths().len()));
386 };
387
388 let render_pass_descriptor = metal::RenderPassDescriptor::new();
389 let color_attachment = render_pass_descriptor
390 .color_attachments()
391 .object_at(0)
392 .unwrap();
393
394 color_attachment.set_texture(Some(drawable.texture()));
395 color_attachment.set_load_action(metal::MTLLoadAction::Clear);
396 color_attachment.set_store_action(metal::MTLStoreAction::Store);
397 let alpha = if self.layer.is_opaque() { 1. } else { 0. };
398 color_attachment.set_clear_color(metal::MTLClearColor::new(0., 0., 0., alpha));
399 let command_encoder = command_buffer.new_render_command_encoder(render_pass_descriptor);
400
401 command_encoder.set_viewport(metal::MTLViewport {
402 originX: 0.0,
403 originY: 0.0,
404 width: i32::from(viewport_size.width) as f64,
405 height: i32::from(viewport_size.height) as f64,
406 znear: 0.0,
407 zfar: 1.0,
408 });
409
410 for batch in scene.batches() {
411 let ok = match batch {
412 PrimitiveBatch::Shadows(shadows) => self.draw_shadows(
413 shadows,
414 instance_buffer,
415 &mut instance_offset,
416 viewport_size,
417 command_encoder,
418 ),
419 PrimitiveBatch::Quads(quads) => self.draw_quads(
420 quads,
421 instance_buffer,
422 &mut instance_offset,
423 viewport_size,
424 command_encoder,
425 ),
426 PrimitiveBatch::Paths(paths) => self.draw_paths(
427 paths,
428 &path_tiles,
429 instance_buffer,
430 &mut instance_offset,
431 viewport_size,
432 command_encoder,
433 ),
434 PrimitiveBatch::Underlines(underlines) => self.draw_underlines(
435 underlines,
436 instance_buffer,
437 &mut instance_offset,
438 viewport_size,
439 command_encoder,
440 ),
441 PrimitiveBatch::MonochromeSprites {
442 texture_id,
443 sprites,
444 } => self.draw_monochrome_sprites(
445 texture_id,
446 sprites,
447 instance_buffer,
448 &mut instance_offset,
449 viewport_size,
450 command_encoder,
451 ),
452 PrimitiveBatch::PolychromeSprites {
453 texture_id,
454 sprites,
455 } => self.draw_polychrome_sprites(
456 texture_id,
457 sprites,
458 instance_buffer,
459 &mut instance_offset,
460 viewport_size,
461 command_encoder,
462 ),
463 PrimitiveBatch::Surfaces(surfaces) => self.draw_surfaces(
464 surfaces,
465 instance_buffer,
466 &mut instance_offset,
467 viewport_size,
468 command_encoder,
469 ),
470 };
471
472 if !ok {
473 command_encoder.end_encoding();
474 return Err(anyhow!("scene too large: {} paths, {} shadows, {} quads, {} underlines, {} mono, {} poly, {} surfaces",
475 scene.paths.len(),
476 scene.shadows.len(),
477 scene.quads.len(),
478 scene.underlines.len(),
479 scene.monochrome_sprites.len(),
480 scene.polychrome_sprites.len(),
481 scene.surfaces.len(),
482 ));
483 }
484 }
485
486 command_encoder.end_encoding();
487
488 instance_buffer.metal_buffer.did_modify_range(NSRange {
489 location: 0,
490 length: instance_offset as NSUInteger,
491 });
492 Ok(command_buffer.to_owned())
493 }
494
495 fn rasterize_paths(
496 &self,
497 paths: &[Path<ScaledPixels>],
498 instance_buffer: &mut InstanceBuffer,
499 instance_offset: &mut usize,
500 command_buffer: &metal::CommandBufferRef,
501 ) -> Option<HashMap<PathId, AtlasTile>> {
502 self.sprite_atlas.clear_textures(AtlasTextureKind::Path);
503
504 let mut tiles = HashMap::default();
505 let mut vertices_by_texture_id = HashMap::default();
506 for path in paths {
507 let clipped_bounds = path.bounds.intersect(&path.content_mask.bounds);
508
509 let tile = self
510 .sprite_atlas
511 .allocate(clipped_bounds.size.map(Into::into), AtlasTextureKind::Path)?;
512 vertices_by_texture_id
513 .entry(tile.texture_id)
514 .or_insert(Vec::new())
515 .extend(path.vertices.iter().map(|vertex| PathVertex {
516 xy_position: vertex.xy_position - clipped_bounds.origin
517 + tile.bounds.origin.map(Into::into),
518 st_position: vertex.st_position,
519 content_mask: ContentMask {
520 bounds: tile.bounds.map(Into::into),
521 },
522 }));
523 tiles.insert(path.id, tile);
524 }
525
526 for (texture_id, vertices) in vertices_by_texture_id {
527 align_offset(instance_offset);
528 let vertices_bytes_len = mem::size_of_val(vertices.as_slice());
529 let next_offset = *instance_offset + vertices_bytes_len;
530 if next_offset > instance_buffer.size {
531 return None;
532 }
533
534 let render_pass_descriptor = metal::RenderPassDescriptor::new();
535 let color_attachment = render_pass_descriptor
536 .color_attachments()
537 .object_at(0)
538 .unwrap();
539
540 let texture = self.sprite_atlas.metal_texture(texture_id);
541 let msaa_texture = self.sprite_atlas.msaa_texture(texture_id);
542
543 if let Some(msaa_texture) = msaa_texture {
544 color_attachment.set_texture(Some(&msaa_texture));
545 color_attachment.set_resolve_texture(Some(&texture));
546 color_attachment.set_load_action(metal::MTLLoadAction::Clear);
547 color_attachment.set_store_action(metal::MTLStoreAction::MultisampleResolve);
548 } else {
549 color_attachment.set_texture(Some(&texture));
550 color_attachment.set_load_action(metal::MTLLoadAction::Clear);
551 color_attachment.set_store_action(metal::MTLStoreAction::Store);
552 }
553 color_attachment.set_clear_color(metal::MTLClearColor::new(0., 0., 0., 1.));
554
555 let command_encoder = command_buffer.new_render_command_encoder(render_pass_descriptor);
556 command_encoder.set_render_pipeline_state(&self.paths_rasterization_pipeline_state);
557 command_encoder.set_vertex_buffer(
558 PathRasterizationInputIndex::Vertices as u64,
559 Some(&instance_buffer.metal_buffer),
560 *instance_offset as u64,
561 );
562 let texture_size = Size {
563 width: DevicePixels::from(texture.width()),
564 height: DevicePixels::from(texture.height()),
565 };
566 command_encoder.set_vertex_bytes(
567 PathRasterizationInputIndex::AtlasTextureSize as u64,
568 mem::size_of_val(&texture_size) as u64,
569 &texture_size as *const Size<DevicePixels> as *const _,
570 );
571
572 let buffer_contents = unsafe {
573 (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset)
574 };
575 unsafe {
576 ptr::copy_nonoverlapping(
577 vertices.as_ptr() as *const u8,
578 buffer_contents,
579 vertices_bytes_len,
580 );
581 }
582
583 command_encoder.draw_primitives(
584 metal::MTLPrimitiveType::Triangle,
585 0,
586 vertices.len() as u64,
587 );
588 command_encoder.end_encoding();
589 *instance_offset = next_offset;
590 }
591
592 Some(tiles)
593 }
594
595 fn draw_shadows(
596 &self,
597 shadows: &[Shadow],
598 instance_buffer: &mut InstanceBuffer,
599 instance_offset: &mut usize,
600 viewport_size: Size<DevicePixels>,
601 command_encoder: &metal::RenderCommandEncoderRef,
602 ) -> bool {
603 if shadows.is_empty() {
604 return true;
605 }
606 align_offset(instance_offset);
607
608 command_encoder.set_render_pipeline_state(&self.shadows_pipeline_state);
609 command_encoder.set_vertex_buffer(
610 ShadowInputIndex::Vertices as u64,
611 Some(&self.unit_vertices),
612 0,
613 );
614 command_encoder.set_vertex_buffer(
615 ShadowInputIndex::Shadows as u64,
616 Some(&instance_buffer.metal_buffer),
617 *instance_offset as u64,
618 );
619 command_encoder.set_fragment_buffer(
620 ShadowInputIndex::Shadows as u64,
621 Some(&instance_buffer.metal_buffer),
622 *instance_offset as u64,
623 );
624
625 command_encoder.set_vertex_bytes(
626 ShadowInputIndex::ViewportSize as u64,
627 mem::size_of_val(&viewport_size) as u64,
628 &viewport_size as *const Size<DevicePixels> as *const _,
629 );
630
631 let shadow_bytes_len = mem::size_of_val(shadows);
632 let buffer_contents =
633 unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
634
635 let next_offset = *instance_offset + shadow_bytes_len;
636 if next_offset > instance_buffer.size {
637 return false;
638 }
639
640 unsafe {
641 ptr::copy_nonoverlapping(
642 shadows.as_ptr() as *const u8,
643 buffer_contents,
644 shadow_bytes_len,
645 );
646 }
647
648 command_encoder.draw_primitives_instanced(
649 metal::MTLPrimitiveType::Triangle,
650 0,
651 6,
652 shadows.len() as u64,
653 );
654 *instance_offset = next_offset;
655 true
656 }
657
658 fn draw_quads(
659 &self,
660 quads: &[Quad],
661 instance_buffer: &mut InstanceBuffer,
662 instance_offset: &mut usize,
663 viewport_size: Size<DevicePixels>,
664 command_encoder: &metal::RenderCommandEncoderRef,
665 ) -> bool {
666 if quads.is_empty() {
667 return true;
668 }
669 align_offset(instance_offset);
670
671 command_encoder.set_render_pipeline_state(&self.quads_pipeline_state);
672 command_encoder.set_vertex_buffer(
673 QuadInputIndex::Vertices as u64,
674 Some(&self.unit_vertices),
675 0,
676 );
677 command_encoder.set_vertex_buffer(
678 QuadInputIndex::Quads as u64,
679 Some(&instance_buffer.metal_buffer),
680 *instance_offset as u64,
681 );
682 command_encoder.set_fragment_buffer(
683 QuadInputIndex::Quads as u64,
684 Some(&instance_buffer.metal_buffer),
685 *instance_offset as u64,
686 );
687
688 command_encoder.set_vertex_bytes(
689 QuadInputIndex::ViewportSize as u64,
690 mem::size_of_val(&viewport_size) as u64,
691 &viewport_size as *const Size<DevicePixels> as *const _,
692 );
693
694 let quad_bytes_len = mem::size_of_val(quads);
695 let buffer_contents =
696 unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
697
698 let next_offset = *instance_offset + quad_bytes_len;
699 if next_offset > instance_buffer.size {
700 return false;
701 }
702
703 unsafe {
704 ptr::copy_nonoverlapping(quads.as_ptr() as *const u8, buffer_contents, quad_bytes_len);
705 }
706
707 command_encoder.draw_primitives_instanced(
708 metal::MTLPrimitiveType::Triangle,
709 0,
710 6,
711 quads.len() as u64,
712 );
713 *instance_offset = next_offset;
714 true
715 }
716
717 fn draw_paths(
718 &self,
719 paths: &[Path<ScaledPixels>],
720 tiles_by_path_id: &HashMap<PathId, AtlasTile>,
721 instance_buffer: &mut InstanceBuffer,
722 instance_offset: &mut usize,
723 viewport_size: Size<DevicePixels>,
724 command_encoder: &metal::RenderCommandEncoderRef,
725 ) -> bool {
726 if paths.is_empty() {
727 return true;
728 }
729
730 command_encoder.set_render_pipeline_state(&self.path_sprites_pipeline_state);
731 command_encoder.set_vertex_buffer(
732 SpriteInputIndex::Vertices as u64,
733 Some(&self.unit_vertices),
734 0,
735 );
736 command_encoder.set_vertex_bytes(
737 SpriteInputIndex::ViewportSize as u64,
738 mem::size_of_val(&viewport_size) as u64,
739 &viewport_size as *const Size<DevicePixels> as *const _,
740 );
741
742 let mut prev_texture_id = None;
743 let mut sprites = SmallVec::<[_; 1]>::new();
744 let mut paths_and_tiles = paths
745 .iter()
746 .map(|path| (path, tiles_by_path_id.get(&path.id).unwrap()))
747 .peekable();
748
749 loop {
750 if let Some((path, tile)) = paths_and_tiles.peek() {
751 if prev_texture_id.map_or(true, |texture_id| texture_id == tile.texture_id) {
752 prev_texture_id = Some(tile.texture_id);
753 let origin = path.bounds.intersect(&path.content_mask.bounds).origin;
754 sprites.push(PathSprite {
755 bounds: Bounds {
756 origin: origin.map(|p| p.floor()),
757 size: tile.bounds.size.map(Into::into),
758 },
759 color: path.color,
760 tile: (*tile).clone(),
761 });
762 paths_and_tiles.next();
763 continue;
764 }
765 }
766
767 if sprites.is_empty() {
768 break;
769 } else {
770 align_offset(instance_offset);
771 let texture_id = prev_texture_id.take().unwrap();
772 let texture: metal::Texture = self.sprite_atlas.metal_texture(texture_id);
773 let texture_size = size(
774 DevicePixels(texture.width() as i32),
775 DevicePixels(texture.height() as i32),
776 );
777
778 command_encoder.set_vertex_buffer(
779 SpriteInputIndex::Sprites as u64,
780 Some(&instance_buffer.metal_buffer),
781 *instance_offset as u64,
782 );
783 command_encoder.set_vertex_bytes(
784 SpriteInputIndex::AtlasTextureSize as u64,
785 mem::size_of_val(&texture_size) as u64,
786 &texture_size as *const Size<DevicePixels> as *const _,
787 );
788 command_encoder.set_fragment_buffer(
789 SpriteInputIndex::Sprites as u64,
790 Some(&instance_buffer.metal_buffer),
791 *instance_offset as u64,
792 );
793 command_encoder
794 .set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
795
796 let sprite_bytes_len = mem::size_of_val(sprites.as_slice());
797 let next_offset = *instance_offset + sprite_bytes_len;
798 if next_offset > instance_buffer.size {
799 return false;
800 }
801
802 let buffer_contents = unsafe {
803 (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset)
804 };
805
806 unsafe {
807 ptr::copy_nonoverlapping(
808 sprites.as_ptr() as *const u8,
809 buffer_contents,
810 sprite_bytes_len,
811 );
812 }
813
814 command_encoder.draw_primitives_instanced(
815 metal::MTLPrimitiveType::Triangle,
816 0,
817 6,
818 sprites.len() as u64,
819 );
820 *instance_offset = next_offset;
821 sprites.clear();
822 }
823 }
824 true
825 }
826
827 fn draw_underlines(
828 &self,
829 underlines: &[Underline],
830 instance_buffer: &mut InstanceBuffer,
831 instance_offset: &mut usize,
832 viewport_size: Size<DevicePixels>,
833 command_encoder: &metal::RenderCommandEncoderRef,
834 ) -> bool {
835 if underlines.is_empty() {
836 return true;
837 }
838 align_offset(instance_offset);
839
840 command_encoder.set_render_pipeline_state(&self.underlines_pipeline_state);
841 command_encoder.set_vertex_buffer(
842 UnderlineInputIndex::Vertices as u64,
843 Some(&self.unit_vertices),
844 0,
845 );
846 command_encoder.set_vertex_buffer(
847 UnderlineInputIndex::Underlines as u64,
848 Some(&instance_buffer.metal_buffer),
849 *instance_offset as u64,
850 );
851 command_encoder.set_fragment_buffer(
852 UnderlineInputIndex::Underlines as u64,
853 Some(&instance_buffer.metal_buffer),
854 *instance_offset as u64,
855 );
856
857 command_encoder.set_vertex_bytes(
858 UnderlineInputIndex::ViewportSize as u64,
859 mem::size_of_val(&viewport_size) as u64,
860 &viewport_size as *const Size<DevicePixels> as *const _,
861 );
862
863 let underline_bytes_len = mem::size_of_val(underlines);
864 let buffer_contents =
865 unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
866
867 let next_offset = *instance_offset + underline_bytes_len;
868 if next_offset > instance_buffer.size {
869 return false;
870 }
871
872 unsafe {
873 ptr::copy_nonoverlapping(
874 underlines.as_ptr() as *const u8,
875 buffer_contents,
876 underline_bytes_len,
877 );
878 }
879
880 command_encoder.draw_primitives_instanced(
881 metal::MTLPrimitiveType::Triangle,
882 0,
883 6,
884 underlines.len() as u64,
885 );
886 *instance_offset = next_offset;
887 true
888 }
889
890 fn draw_monochrome_sprites(
891 &self,
892 texture_id: AtlasTextureId,
893 sprites: &[MonochromeSprite],
894 instance_buffer: &mut InstanceBuffer,
895 instance_offset: &mut usize,
896 viewport_size: Size<DevicePixels>,
897 command_encoder: &metal::RenderCommandEncoderRef,
898 ) -> bool {
899 if sprites.is_empty() {
900 return true;
901 }
902 align_offset(instance_offset);
903
904 let sprite_bytes_len = mem::size_of_val(sprites);
905 let buffer_contents =
906 unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
907
908 let next_offset = *instance_offset + sprite_bytes_len;
909 if next_offset > instance_buffer.size {
910 return false;
911 }
912
913 let texture = self.sprite_atlas.metal_texture(texture_id);
914 let texture_size = size(
915 DevicePixels(texture.width() as i32),
916 DevicePixels(texture.height() as i32),
917 );
918 command_encoder.set_render_pipeline_state(&self.monochrome_sprites_pipeline_state);
919 command_encoder.set_vertex_buffer(
920 SpriteInputIndex::Vertices as u64,
921 Some(&self.unit_vertices),
922 0,
923 );
924 command_encoder.set_vertex_buffer(
925 SpriteInputIndex::Sprites as u64,
926 Some(&instance_buffer.metal_buffer),
927 *instance_offset as u64,
928 );
929 command_encoder.set_vertex_bytes(
930 SpriteInputIndex::ViewportSize as u64,
931 mem::size_of_val(&viewport_size) as u64,
932 &viewport_size as *const Size<DevicePixels> as *const _,
933 );
934 command_encoder.set_vertex_bytes(
935 SpriteInputIndex::AtlasTextureSize as u64,
936 mem::size_of_val(&texture_size) as u64,
937 &texture_size as *const Size<DevicePixels> as *const _,
938 );
939 command_encoder.set_fragment_buffer(
940 SpriteInputIndex::Sprites as u64,
941 Some(&instance_buffer.metal_buffer),
942 *instance_offset as u64,
943 );
944 command_encoder.set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
945
946 unsafe {
947 ptr::copy_nonoverlapping(
948 sprites.as_ptr() as *const u8,
949 buffer_contents,
950 sprite_bytes_len,
951 );
952 }
953
954 command_encoder.draw_primitives_instanced(
955 metal::MTLPrimitiveType::Triangle,
956 0,
957 6,
958 sprites.len() as u64,
959 );
960 *instance_offset = next_offset;
961 true
962 }
963
964 fn draw_polychrome_sprites(
965 &self,
966 texture_id: AtlasTextureId,
967 sprites: &[PolychromeSprite],
968 instance_buffer: &mut InstanceBuffer,
969 instance_offset: &mut usize,
970 viewport_size: Size<DevicePixels>,
971 command_encoder: &metal::RenderCommandEncoderRef,
972 ) -> bool {
973 if sprites.is_empty() {
974 return true;
975 }
976 align_offset(instance_offset);
977
978 let texture = self.sprite_atlas.metal_texture(texture_id);
979 let texture_size = size(
980 DevicePixels(texture.width() as i32),
981 DevicePixels(texture.height() as i32),
982 );
983 command_encoder.set_render_pipeline_state(&self.polychrome_sprites_pipeline_state);
984 command_encoder.set_vertex_buffer(
985 SpriteInputIndex::Vertices as u64,
986 Some(&self.unit_vertices),
987 0,
988 );
989 command_encoder.set_vertex_buffer(
990 SpriteInputIndex::Sprites as u64,
991 Some(&instance_buffer.metal_buffer),
992 *instance_offset as u64,
993 );
994 command_encoder.set_vertex_bytes(
995 SpriteInputIndex::ViewportSize as u64,
996 mem::size_of_val(&viewport_size) as u64,
997 &viewport_size as *const Size<DevicePixels> as *const _,
998 );
999 command_encoder.set_vertex_bytes(
1000 SpriteInputIndex::AtlasTextureSize as u64,
1001 mem::size_of_val(&texture_size) as u64,
1002 &texture_size as *const Size<DevicePixels> as *const _,
1003 );
1004 command_encoder.set_fragment_buffer(
1005 SpriteInputIndex::Sprites as u64,
1006 Some(&instance_buffer.metal_buffer),
1007 *instance_offset as u64,
1008 );
1009 command_encoder.set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
1010
1011 let sprite_bytes_len = mem::size_of_val(sprites);
1012 let buffer_contents =
1013 unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
1014
1015 let next_offset = *instance_offset + sprite_bytes_len;
1016 if next_offset > instance_buffer.size {
1017 return false;
1018 }
1019
1020 unsafe {
1021 ptr::copy_nonoverlapping(
1022 sprites.as_ptr() as *const u8,
1023 buffer_contents,
1024 sprite_bytes_len,
1025 );
1026 }
1027
1028 command_encoder.draw_primitives_instanced(
1029 metal::MTLPrimitiveType::Triangle,
1030 0,
1031 6,
1032 sprites.len() as u64,
1033 );
1034 *instance_offset = next_offset;
1035 true
1036 }
1037
1038 fn draw_surfaces(
1039 &mut self,
1040 surfaces: &[PaintSurface],
1041 instance_buffer: &mut InstanceBuffer,
1042 instance_offset: &mut usize,
1043 viewport_size: Size<DevicePixels>,
1044 command_encoder: &metal::RenderCommandEncoderRef,
1045 ) -> bool {
1046 command_encoder.set_render_pipeline_state(&self.surfaces_pipeline_state);
1047 command_encoder.set_vertex_buffer(
1048 SurfaceInputIndex::Vertices as u64,
1049 Some(&self.unit_vertices),
1050 0,
1051 );
1052 command_encoder.set_vertex_bytes(
1053 SurfaceInputIndex::ViewportSize as u64,
1054 mem::size_of_val(&viewport_size) as u64,
1055 &viewport_size as *const Size<DevicePixels> as *const _,
1056 );
1057
1058 for surface in surfaces {
1059 let texture_size = size(
1060 DevicePixels::from(surface.image_buffer.get_width() as i32),
1061 DevicePixels::from(surface.image_buffer.get_height() as i32),
1062 );
1063
1064 assert_eq!(
1065 surface.image_buffer.get_pixel_format(),
1066 kCVPixelFormatType_420YpCbCr8BiPlanarFullRange
1067 );
1068
1069 let y_texture = self
1070 .core_video_texture_cache
1071 .create_texture_from_image(
1072 surface.image_buffer.as_concrete_TypeRef(),
1073 None,
1074 MTLPixelFormat::R8Unorm,
1075 surface.image_buffer.get_width_of_plane(0),
1076 surface.image_buffer.get_height_of_plane(0),
1077 0,
1078 )
1079 .unwrap();
1080 let cb_cr_texture = self
1081 .core_video_texture_cache
1082 .create_texture_from_image(
1083 surface.image_buffer.as_concrete_TypeRef(),
1084 None,
1085 MTLPixelFormat::RG8Unorm,
1086 surface.image_buffer.get_width_of_plane(1),
1087 surface.image_buffer.get_height_of_plane(1),
1088 1,
1089 )
1090 .unwrap();
1091
1092 align_offset(instance_offset);
1093 let next_offset = *instance_offset + mem::size_of::<Surface>();
1094 if next_offset > instance_buffer.size {
1095 return false;
1096 }
1097
1098 command_encoder.set_vertex_buffer(
1099 SurfaceInputIndex::Surfaces as u64,
1100 Some(&instance_buffer.metal_buffer),
1101 *instance_offset as u64,
1102 );
1103 command_encoder.set_vertex_bytes(
1104 SurfaceInputIndex::TextureSize as u64,
1105 mem::size_of_val(&texture_size) as u64,
1106 &texture_size as *const Size<DevicePixels> as *const _,
1107 );
1108 // let y_texture = y_texture.get_texture().unwrap().
1109 command_encoder.set_fragment_texture(SurfaceInputIndex::YTexture as u64, unsafe {
1110 let texture = CVMetalTextureGetTexture(y_texture.as_concrete_TypeRef());
1111 Some(metal::TextureRef::from_ptr(texture as *mut _))
1112 });
1113 command_encoder.set_fragment_texture(SurfaceInputIndex::CbCrTexture as u64, unsafe {
1114 let texture = CVMetalTextureGetTexture(cb_cr_texture.as_concrete_TypeRef());
1115 Some(metal::TextureRef::from_ptr(texture as *mut _))
1116 });
1117
1118 unsafe {
1119 let buffer_contents = (instance_buffer.metal_buffer.contents() as *mut u8)
1120 .add(*instance_offset)
1121 as *mut SurfaceBounds;
1122 ptr::write(
1123 buffer_contents,
1124 SurfaceBounds {
1125 bounds: surface.bounds,
1126 content_mask: surface.content_mask.clone(),
1127 },
1128 );
1129 }
1130
1131 command_encoder.draw_primitives(metal::MTLPrimitiveType::Triangle, 0, 6);
1132 *instance_offset = next_offset;
1133 }
1134 true
1135 }
1136}
1137
1138fn build_pipeline_state(
1139 device: &metal::DeviceRef,
1140 library: &metal::LibraryRef,
1141 label: &str,
1142 vertex_fn_name: &str,
1143 fragment_fn_name: &str,
1144 pixel_format: metal::MTLPixelFormat,
1145) -> metal::RenderPipelineState {
1146 let vertex_fn = library
1147 .get_function(vertex_fn_name, None)
1148 .expect("error locating vertex function");
1149 let fragment_fn = library
1150 .get_function(fragment_fn_name, None)
1151 .expect("error locating fragment function");
1152
1153 let descriptor = metal::RenderPipelineDescriptor::new();
1154 descriptor.set_label(label);
1155 descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
1156 descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
1157 let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
1158 color_attachment.set_pixel_format(pixel_format);
1159 color_attachment.set_blending_enabled(true);
1160 color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
1161 color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
1162 color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::SourceAlpha);
1163 color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::One);
1164 color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
1165 color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::One);
1166
1167 device
1168 .new_render_pipeline_state(&descriptor)
1169 .expect("could not create render pipeline state")
1170}
1171
1172fn build_path_rasterization_pipeline_state(
1173 device: &metal::DeviceRef,
1174 library: &metal::LibraryRef,
1175 label: &str,
1176 vertex_fn_name: &str,
1177 fragment_fn_name: &str,
1178 pixel_format: metal::MTLPixelFormat,
1179 path_sample_count: u32,
1180) -> metal::RenderPipelineState {
1181 let vertex_fn = library
1182 .get_function(vertex_fn_name, None)
1183 .expect("error locating vertex function");
1184 let fragment_fn = library
1185 .get_function(fragment_fn_name, None)
1186 .expect("error locating fragment function");
1187
1188 let descriptor = metal::RenderPipelineDescriptor::new();
1189 descriptor.set_label(label);
1190 descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
1191 descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
1192 if path_sample_count > 1 {
1193 descriptor.set_raster_sample_count(path_sample_count as _);
1194 descriptor.set_alpha_to_coverage_enabled(true);
1195 }
1196 let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
1197 color_attachment.set_pixel_format(pixel_format);
1198 color_attachment.set_blending_enabled(true);
1199 color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
1200 color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
1201 color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::One);
1202 color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::One);
1203 color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::One);
1204 color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::One);
1205
1206 device
1207 .new_render_pipeline_state(&descriptor)
1208 .expect("could not create render pipeline state")
1209}
1210
1211// Align to multiples of 256 make Metal happy.
1212fn align_offset(offset: &mut usize) {
1213 *offset = ((*offset + 255) / 256) * 256;
1214}
1215
1216#[repr(C)]
1217enum ShadowInputIndex {
1218 Vertices = 0,
1219 Shadows = 1,
1220 ViewportSize = 2,
1221}
1222
1223#[repr(C)]
1224enum QuadInputIndex {
1225 Vertices = 0,
1226 Quads = 1,
1227 ViewportSize = 2,
1228}
1229
1230#[repr(C)]
1231enum UnderlineInputIndex {
1232 Vertices = 0,
1233 Underlines = 1,
1234 ViewportSize = 2,
1235}
1236
1237#[repr(C)]
1238enum SpriteInputIndex {
1239 Vertices = 0,
1240 Sprites = 1,
1241 ViewportSize = 2,
1242 AtlasTextureSize = 3,
1243 AtlasTexture = 4,
1244}
1245
1246#[repr(C)]
1247enum SurfaceInputIndex {
1248 Vertices = 0,
1249 Surfaces = 1,
1250 ViewportSize = 2,
1251 TextureSize = 3,
1252 YTexture = 4,
1253 CbCrTexture = 5,
1254}
1255
1256#[repr(C)]
1257enum PathRasterizationInputIndex {
1258 Vertices = 0,
1259 AtlasTextureSize = 1,
1260}
1261
1262#[derive(Clone, Debug, Eq, PartialEq)]
1263#[repr(C)]
1264pub struct PathSprite {
1265 pub bounds: Bounds<ScaledPixels>,
1266 pub color: Background,
1267 pub tile: AtlasTile,
1268}
1269
1270#[derive(Clone, Debug, Eq, PartialEq)]
1271#[repr(C)]
1272pub struct SurfaceBounds {
1273 pub bounds: Bounds<ScaledPixels>,
1274 pub content_mask: ContentMask<ScaledPixels>,
1275}