1use super::metal_atlas::MetalAtlas;
2use crate::{
3 point, size, AtlasTextureId, AtlasTextureKind, AtlasTile, Background, Bounds, ContentMask,
4 DevicePixels, MonochromeSprite, PaintSurface, Path, PathId, PathVertex, PolychromeSprite,
5 PrimitiveBatch, Quad, ScaledPixels, Scene, Shadow, Size, Surface, Underline,
6};
7use anyhow::{anyhow, Result};
8use block::ConcreteBlock;
9use cocoa::{
10 base::{NO, YES},
11 foundation::{NSSize, NSUInteger},
12 quartzcore::AutoresizingMask,
13};
14use collections::HashMap;
15use core_foundation::base::TCFType;
16use foreign_types::ForeignType;
17use media::core_video::CVMetalTextureCache;
18use metal::{CAMetalLayer, CommandQueue, MTLPixelFormat, MTLResourceOptions, NSRange};
19use objc::{self, msg_send, sel, sel_impl};
20use parking_lot::Mutex;
21use smallvec::SmallVec;
22use std::{cell::Cell, ffi::c_void, mem, ptr, sync::Arc};
23
24// Exported to metal
25pub(crate) type PointF = crate::Point<f32>;
26
27#[cfg(not(feature = "runtime_shaders"))]
28const SHADERS_METALLIB: &[u8] = include_bytes!(concat!(env!("OUT_DIR"), "/shaders.metallib"));
29#[cfg(feature = "runtime_shaders")]
30const SHADERS_SOURCE_FILE: &str = include_str!(concat!(env!("OUT_DIR"), "/stitched_shaders.metal"));
31// Use 4x MSAA, all devices support it.
32// https://developer.apple.com/documentation/metal/mtldevice/1433355-supportstexturesamplecount
33const PATH_SAMPLE_COUNT: u32 = 4;
34
35pub type Context = Arc<Mutex<InstanceBufferPool>>;
36pub type Renderer = MetalRenderer;
37
38pub unsafe fn new_renderer(
39 context: self::Context,
40 _native_window: *mut c_void,
41 _native_view: *mut c_void,
42 _bounds: crate::Size<f32>,
43 _transparent: bool,
44) -> Renderer {
45 MetalRenderer::new(context)
46}
47
48pub(crate) struct InstanceBufferPool {
49 buffer_size: usize,
50 buffers: Vec<metal::Buffer>,
51}
52
53impl Default for InstanceBufferPool {
54 fn default() -> Self {
55 Self {
56 buffer_size: 2 * 1024 * 1024,
57 buffers: Vec::new(),
58 }
59 }
60}
61
62pub(crate) struct InstanceBuffer {
63 metal_buffer: metal::Buffer,
64 size: usize,
65}
66
67impl InstanceBufferPool {
68 pub(crate) fn reset(&mut self, buffer_size: usize) {
69 self.buffer_size = buffer_size;
70 self.buffers.clear();
71 }
72
73 pub(crate) fn acquire(&mut self, device: &metal::Device) -> InstanceBuffer {
74 let buffer = self.buffers.pop().unwrap_or_else(|| {
75 device.new_buffer(
76 self.buffer_size as u64,
77 MTLResourceOptions::StorageModeManaged,
78 )
79 });
80 InstanceBuffer {
81 metal_buffer: buffer,
82 size: self.buffer_size,
83 }
84 }
85
86 pub(crate) fn release(&mut self, buffer: InstanceBuffer) {
87 if buffer.size == self.buffer_size {
88 self.buffers.push(buffer.metal_buffer)
89 }
90 }
91}
92
93pub(crate) struct MetalRenderer {
94 device: metal::Device,
95 layer: metal::MetalLayer,
96 presents_with_transaction: bool,
97 command_queue: CommandQueue,
98 paths_rasterization_pipeline_state: metal::RenderPipelineState,
99 path_sprites_pipeline_state: metal::RenderPipelineState,
100 shadows_pipeline_state: metal::RenderPipelineState,
101 quads_pipeline_state: metal::RenderPipelineState,
102 underlines_pipeline_state: metal::RenderPipelineState,
103 monochrome_sprites_pipeline_state: metal::RenderPipelineState,
104 polychrome_sprites_pipeline_state: metal::RenderPipelineState,
105 surfaces_pipeline_state: metal::RenderPipelineState,
106 unit_vertices: metal::Buffer,
107 #[allow(clippy::arc_with_non_send_sync)]
108 instance_buffer_pool: Arc<Mutex<InstanceBufferPool>>,
109 sprite_atlas: Arc<MetalAtlas>,
110 core_video_texture_cache: CVMetalTextureCache,
111}
112
113impl MetalRenderer {
114 pub fn new(instance_buffer_pool: Arc<Mutex<InstanceBufferPool>>) -> Self {
115 // Prefer low‐power integrated GPUs on Intel Mac. On Apple
116 // Silicon, there is only ever one GPU, so this is equivalent to
117 // `metal::Device::system_default()`.
118 let mut devices = metal::Device::all();
119 devices.sort_by_key(|device| (device.is_removable(), device.is_low_power()));
120 let Some(device) = devices.pop() else {
121 log::error!("unable to access a compatible graphics device");
122 std::process::exit(1);
123 };
124
125 let layer = metal::MetalLayer::new();
126 layer.set_device(&device);
127 layer.set_pixel_format(MTLPixelFormat::BGRA8Unorm);
128 layer.set_opaque(false);
129 layer.set_maximum_drawable_count(3);
130 unsafe {
131 let _: () = msg_send![&*layer, setAllowsNextDrawableTimeout: NO];
132 let _: () = msg_send![&*layer, setNeedsDisplayOnBoundsChange: YES];
133 let _: () = msg_send![
134 &*layer,
135 setAutoresizingMask: AutoresizingMask::WIDTH_SIZABLE
136 | AutoresizingMask::HEIGHT_SIZABLE
137 ];
138 }
139 #[cfg(feature = "runtime_shaders")]
140 let library = device
141 .new_library_with_source(&SHADERS_SOURCE_FILE, &metal::CompileOptions::new())
142 .expect("error building metal library");
143 #[cfg(not(feature = "runtime_shaders"))]
144 let library = device
145 .new_library_with_data(SHADERS_METALLIB)
146 .expect("error building metal library");
147
148 fn to_float2_bits(point: PointF) -> u64 {
149 let mut output = point.y.to_bits() as u64;
150 output <<= 32;
151 output |= point.x.to_bits() as u64;
152 output
153 }
154
155 let unit_vertices = [
156 to_float2_bits(point(0., 0.)),
157 to_float2_bits(point(1., 0.)),
158 to_float2_bits(point(0., 1.)),
159 to_float2_bits(point(0., 1.)),
160 to_float2_bits(point(1., 0.)),
161 to_float2_bits(point(1., 1.)),
162 ];
163 let unit_vertices = device.new_buffer_with_data(
164 unit_vertices.as_ptr() as *const c_void,
165 mem::size_of_val(&unit_vertices) as u64,
166 MTLResourceOptions::StorageModeManaged,
167 );
168
169 let paths_rasterization_pipeline_state = build_path_rasterization_pipeline_state(
170 &device,
171 &library,
172 "paths_rasterization",
173 "path_rasterization_vertex",
174 "path_rasterization_fragment",
175 MTLPixelFormat::R16Float,
176 PATH_SAMPLE_COUNT,
177 );
178 let path_sprites_pipeline_state = build_pipeline_state(
179 &device,
180 &library,
181 "path_sprites",
182 "path_sprite_vertex",
183 "path_sprite_fragment",
184 MTLPixelFormat::BGRA8Unorm,
185 );
186 let shadows_pipeline_state = build_pipeline_state(
187 &device,
188 &library,
189 "shadows",
190 "shadow_vertex",
191 "shadow_fragment",
192 MTLPixelFormat::BGRA8Unorm,
193 );
194 let quads_pipeline_state = build_pipeline_state(
195 &device,
196 &library,
197 "quads",
198 "quad_vertex",
199 "quad_fragment",
200 MTLPixelFormat::BGRA8Unorm,
201 );
202 let underlines_pipeline_state = build_pipeline_state(
203 &device,
204 &library,
205 "underlines",
206 "underline_vertex",
207 "underline_fragment",
208 MTLPixelFormat::BGRA8Unorm,
209 );
210 let monochrome_sprites_pipeline_state = build_pipeline_state(
211 &device,
212 &library,
213 "monochrome_sprites",
214 "monochrome_sprite_vertex",
215 "monochrome_sprite_fragment",
216 MTLPixelFormat::BGRA8Unorm,
217 );
218 let polychrome_sprites_pipeline_state = build_pipeline_state(
219 &device,
220 &library,
221 "polychrome_sprites",
222 "polychrome_sprite_vertex",
223 "polychrome_sprite_fragment",
224 MTLPixelFormat::BGRA8Unorm,
225 );
226 let surfaces_pipeline_state = build_pipeline_state(
227 &device,
228 &library,
229 "surfaces",
230 "surface_vertex",
231 "surface_fragment",
232 MTLPixelFormat::BGRA8Unorm,
233 );
234
235 let command_queue = device.new_command_queue();
236 let sprite_atlas = Arc::new(MetalAtlas::new(device.clone(), PATH_SAMPLE_COUNT));
237 let core_video_texture_cache =
238 unsafe { CVMetalTextureCache::new(device.as_ptr()).unwrap() };
239
240 Self {
241 device,
242 layer,
243 presents_with_transaction: false,
244 command_queue,
245 paths_rasterization_pipeline_state,
246 path_sprites_pipeline_state,
247 shadows_pipeline_state,
248 quads_pipeline_state,
249 underlines_pipeline_state,
250 monochrome_sprites_pipeline_state,
251 polychrome_sprites_pipeline_state,
252 surfaces_pipeline_state,
253 unit_vertices,
254 instance_buffer_pool,
255 sprite_atlas,
256 core_video_texture_cache,
257 }
258 }
259
260 pub fn layer(&self) -> &metal::MetalLayerRef {
261 &self.layer
262 }
263
264 pub fn layer_ptr(&self) -> *mut CAMetalLayer {
265 self.layer.as_ptr()
266 }
267
268 pub fn sprite_atlas(&self) -> &Arc<MetalAtlas> {
269 &self.sprite_atlas
270 }
271
272 pub fn set_presents_with_transaction(&mut self, presents_with_transaction: bool) {
273 self.presents_with_transaction = presents_with_transaction;
274 self.layer
275 .set_presents_with_transaction(presents_with_transaction);
276 }
277
278 pub fn update_drawable_size(&mut self, size: Size<DevicePixels>) {
279 let size = NSSize {
280 width: size.width.0 as f64,
281 height: size.height.0 as f64,
282 };
283 unsafe {
284 let _: () = msg_send![
285 self.layer(),
286 setDrawableSize: size
287 ];
288 }
289 }
290
291 pub fn update_transparency(&self, _transparent: bool) {
292 // todo(mac)?
293 }
294
295 pub fn destroy(&self) {
296 // nothing to do
297 }
298
299 pub fn draw(&mut self, scene: &Scene) {
300 let layer = self.layer.clone();
301 let viewport_size = layer.drawable_size();
302 let viewport_size: Size<DevicePixels> = size(
303 (viewport_size.width.ceil() as i32).into(),
304 (viewport_size.height.ceil() as i32).into(),
305 );
306 let drawable = if let Some(drawable) = layer.next_drawable() {
307 drawable
308 } else {
309 log::error!(
310 "failed to retrieve next drawable, drawable size: {:?}",
311 viewport_size
312 );
313 return;
314 };
315
316 loop {
317 let mut instance_buffer = self.instance_buffer_pool.lock().acquire(&self.device);
318
319 let command_buffer =
320 self.draw_primitives(scene, &mut instance_buffer, drawable, viewport_size);
321
322 match command_buffer {
323 Ok(command_buffer) => {
324 let instance_buffer_pool = self.instance_buffer_pool.clone();
325 let instance_buffer = Cell::new(Some(instance_buffer));
326 let block = ConcreteBlock::new(move |_| {
327 if let Some(instance_buffer) = instance_buffer.take() {
328 instance_buffer_pool.lock().release(instance_buffer);
329 }
330 });
331 let block = block.copy();
332 command_buffer.add_completed_handler(&block);
333
334 if self.presents_with_transaction {
335 command_buffer.commit();
336 command_buffer.wait_until_scheduled();
337 drawable.present();
338 } else {
339 command_buffer.present_drawable(drawable);
340 command_buffer.commit();
341 }
342 return;
343 }
344 Err(err) => {
345 log::error!(
346 "failed to render: {}. retrying with larger instance buffer size",
347 err
348 );
349 let mut instance_buffer_pool = self.instance_buffer_pool.lock();
350 let buffer_size = instance_buffer_pool.buffer_size;
351 if buffer_size >= 256 * 1024 * 1024 {
352 log::error!("instance buffer size grew too large: {}", buffer_size);
353 break;
354 }
355 instance_buffer_pool.reset(buffer_size * 2);
356 log::info!(
357 "increased instance buffer size to {}",
358 instance_buffer_pool.buffer_size
359 );
360 }
361 }
362 }
363 }
364
365 fn draw_primitives(
366 &mut self,
367 scene: &Scene,
368 instance_buffer: &mut InstanceBuffer,
369 drawable: &metal::MetalDrawableRef,
370 viewport_size: Size<DevicePixels>,
371 ) -> Result<metal::CommandBuffer> {
372 let command_queue = self.command_queue.clone();
373 let command_buffer = command_queue.new_command_buffer();
374 let mut instance_offset = 0;
375
376 let Some(path_tiles) = self.rasterize_paths(
377 scene.paths(),
378 instance_buffer,
379 &mut instance_offset,
380 command_buffer,
381 ) else {
382 return Err(anyhow!("failed to rasterize {} paths", scene.paths().len()));
383 };
384
385 let render_pass_descriptor = metal::RenderPassDescriptor::new();
386 let color_attachment = render_pass_descriptor
387 .color_attachments()
388 .object_at(0)
389 .unwrap();
390
391 color_attachment.set_texture(Some(drawable.texture()));
392 color_attachment.set_load_action(metal::MTLLoadAction::Clear);
393 color_attachment.set_store_action(metal::MTLStoreAction::Store);
394 let alpha = if self.layer.is_opaque() { 1. } else { 0. };
395 color_attachment.set_clear_color(metal::MTLClearColor::new(0., 0., 0., alpha));
396 let command_encoder = command_buffer.new_render_command_encoder(render_pass_descriptor);
397
398 command_encoder.set_viewport(metal::MTLViewport {
399 originX: 0.0,
400 originY: 0.0,
401 width: i32::from(viewport_size.width) as f64,
402 height: i32::from(viewport_size.height) as f64,
403 znear: 0.0,
404 zfar: 1.0,
405 });
406
407 for batch in scene.batches() {
408 let ok = match batch {
409 PrimitiveBatch::Shadows(shadows) => self.draw_shadows(
410 shadows,
411 instance_buffer,
412 &mut instance_offset,
413 viewport_size,
414 command_encoder,
415 ),
416 PrimitiveBatch::Quads(quads) => self.draw_quads(
417 quads,
418 instance_buffer,
419 &mut instance_offset,
420 viewport_size,
421 command_encoder,
422 ),
423 PrimitiveBatch::Paths(paths) => self.draw_paths(
424 paths,
425 &path_tiles,
426 instance_buffer,
427 &mut instance_offset,
428 viewport_size,
429 command_encoder,
430 ),
431 PrimitiveBatch::Underlines(underlines) => self.draw_underlines(
432 underlines,
433 instance_buffer,
434 &mut instance_offset,
435 viewport_size,
436 command_encoder,
437 ),
438 PrimitiveBatch::MonochromeSprites {
439 texture_id,
440 sprites,
441 } => self.draw_monochrome_sprites(
442 texture_id,
443 sprites,
444 instance_buffer,
445 &mut instance_offset,
446 viewport_size,
447 command_encoder,
448 ),
449 PrimitiveBatch::PolychromeSprites {
450 texture_id,
451 sprites,
452 } => self.draw_polychrome_sprites(
453 texture_id,
454 sprites,
455 instance_buffer,
456 &mut instance_offset,
457 viewport_size,
458 command_encoder,
459 ),
460 PrimitiveBatch::Surfaces(surfaces) => self.draw_surfaces(
461 surfaces,
462 instance_buffer,
463 &mut instance_offset,
464 viewport_size,
465 command_encoder,
466 ),
467 };
468
469 if !ok {
470 command_encoder.end_encoding();
471 return Err(anyhow!("scene too large: {} paths, {} shadows, {} quads, {} underlines, {} mono, {} poly, {} surfaces",
472 scene.paths.len(),
473 scene.shadows.len(),
474 scene.quads.len(),
475 scene.underlines.len(),
476 scene.monochrome_sprites.len(),
477 scene.polychrome_sprites.len(),
478 scene.surfaces.len(),
479 ));
480 }
481 }
482
483 command_encoder.end_encoding();
484
485 instance_buffer.metal_buffer.did_modify_range(NSRange {
486 location: 0,
487 length: instance_offset as NSUInteger,
488 });
489 Ok(command_buffer.to_owned())
490 }
491
492 fn rasterize_paths(
493 &self,
494 paths: &[Path<ScaledPixels>],
495 instance_buffer: &mut InstanceBuffer,
496 instance_offset: &mut usize,
497 command_buffer: &metal::CommandBufferRef,
498 ) -> Option<HashMap<PathId, AtlasTile>> {
499 self.sprite_atlas.clear_textures(AtlasTextureKind::Path);
500
501 let mut tiles = HashMap::default();
502 let mut vertices_by_texture_id = HashMap::default();
503 for path in paths {
504 let clipped_bounds = path.bounds.intersect(&path.content_mask.bounds);
505
506 let tile = self
507 .sprite_atlas
508 .allocate(clipped_bounds.size.map(Into::into), AtlasTextureKind::Path)?;
509 vertices_by_texture_id
510 .entry(tile.texture_id)
511 .or_insert(Vec::new())
512 .extend(path.vertices.iter().map(|vertex| PathVertex {
513 xy_position: vertex.xy_position - clipped_bounds.origin
514 + tile.bounds.origin.map(Into::into),
515 st_position: vertex.st_position,
516 content_mask: ContentMask {
517 bounds: tile.bounds.map(Into::into),
518 },
519 }));
520 tiles.insert(path.id, tile);
521 }
522
523 for (texture_id, vertices) in vertices_by_texture_id {
524 align_offset(instance_offset);
525 let vertices_bytes_len = mem::size_of_val(vertices.as_slice());
526 let next_offset = *instance_offset + vertices_bytes_len;
527 if next_offset > instance_buffer.size {
528 return None;
529 }
530
531 let render_pass_descriptor = metal::RenderPassDescriptor::new();
532 let color_attachment = render_pass_descriptor
533 .color_attachments()
534 .object_at(0)
535 .unwrap();
536
537 let texture = self.sprite_atlas.metal_texture(texture_id);
538 let msaa_texture = self.sprite_atlas.msaa_texture(texture_id);
539
540 if let Some(msaa_texture) = msaa_texture {
541 color_attachment.set_texture(Some(&msaa_texture));
542 color_attachment.set_resolve_texture(Some(&texture));
543 color_attachment.set_load_action(metal::MTLLoadAction::Clear);
544 color_attachment.set_store_action(metal::MTLStoreAction::MultisampleResolve);
545 } else {
546 color_attachment.set_texture(Some(&texture));
547 color_attachment.set_load_action(metal::MTLLoadAction::Clear);
548 color_attachment.set_store_action(metal::MTLStoreAction::Store);
549 }
550 color_attachment.set_clear_color(metal::MTLClearColor::new(0., 0., 0., 1.));
551
552 let command_encoder = command_buffer.new_render_command_encoder(render_pass_descriptor);
553 command_encoder.set_render_pipeline_state(&self.paths_rasterization_pipeline_state);
554 command_encoder.set_vertex_buffer(
555 PathRasterizationInputIndex::Vertices as u64,
556 Some(&instance_buffer.metal_buffer),
557 *instance_offset as u64,
558 );
559 let texture_size = Size {
560 width: DevicePixels::from(texture.width()),
561 height: DevicePixels::from(texture.height()),
562 };
563 command_encoder.set_vertex_bytes(
564 PathRasterizationInputIndex::AtlasTextureSize as u64,
565 mem::size_of_val(&texture_size) as u64,
566 &texture_size as *const Size<DevicePixels> as *const _,
567 );
568
569 let buffer_contents = unsafe {
570 (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset)
571 };
572 unsafe {
573 ptr::copy_nonoverlapping(
574 vertices.as_ptr() as *const u8,
575 buffer_contents,
576 vertices_bytes_len,
577 );
578 }
579
580 command_encoder.draw_primitives(
581 metal::MTLPrimitiveType::Triangle,
582 0,
583 vertices.len() as u64,
584 );
585 command_encoder.end_encoding();
586 *instance_offset = next_offset;
587 }
588
589 Some(tiles)
590 }
591
592 fn draw_shadows(
593 &self,
594 shadows: &[Shadow],
595 instance_buffer: &mut InstanceBuffer,
596 instance_offset: &mut usize,
597 viewport_size: Size<DevicePixels>,
598 command_encoder: &metal::RenderCommandEncoderRef,
599 ) -> bool {
600 if shadows.is_empty() {
601 return true;
602 }
603 align_offset(instance_offset);
604
605 command_encoder.set_render_pipeline_state(&self.shadows_pipeline_state);
606 command_encoder.set_vertex_buffer(
607 ShadowInputIndex::Vertices as u64,
608 Some(&self.unit_vertices),
609 0,
610 );
611 command_encoder.set_vertex_buffer(
612 ShadowInputIndex::Shadows as u64,
613 Some(&instance_buffer.metal_buffer),
614 *instance_offset as u64,
615 );
616 command_encoder.set_fragment_buffer(
617 ShadowInputIndex::Shadows as u64,
618 Some(&instance_buffer.metal_buffer),
619 *instance_offset as u64,
620 );
621
622 command_encoder.set_vertex_bytes(
623 ShadowInputIndex::ViewportSize as u64,
624 mem::size_of_val(&viewport_size) as u64,
625 &viewport_size as *const Size<DevicePixels> as *const _,
626 );
627
628 let shadow_bytes_len = mem::size_of_val(shadows);
629 let buffer_contents =
630 unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
631
632 let next_offset = *instance_offset + shadow_bytes_len;
633 if next_offset > instance_buffer.size {
634 return false;
635 }
636
637 unsafe {
638 ptr::copy_nonoverlapping(
639 shadows.as_ptr() as *const u8,
640 buffer_contents,
641 shadow_bytes_len,
642 );
643 }
644
645 command_encoder.draw_primitives_instanced(
646 metal::MTLPrimitiveType::Triangle,
647 0,
648 6,
649 shadows.len() as u64,
650 );
651 *instance_offset = next_offset;
652 true
653 }
654
655 fn draw_quads(
656 &self,
657 quads: &[Quad],
658 instance_buffer: &mut InstanceBuffer,
659 instance_offset: &mut usize,
660 viewport_size: Size<DevicePixels>,
661 command_encoder: &metal::RenderCommandEncoderRef,
662 ) -> bool {
663 if quads.is_empty() {
664 return true;
665 }
666 align_offset(instance_offset);
667
668 command_encoder.set_render_pipeline_state(&self.quads_pipeline_state);
669 command_encoder.set_vertex_buffer(
670 QuadInputIndex::Vertices as u64,
671 Some(&self.unit_vertices),
672 0,
673 );
674 command_encoder.set_vertex_buffer(
675 QuadInputIndex::Quads as u64,
676 Some(&instance_buffer.metal_buffer),
677 *instance_offset as u64,
678 );
679 command_encoder.set_fragment_buffer(
680 QuadInputIndex::Quads as u64,
681 Some(&instance_buffer.metal_buffer),
682 *instance_offset as u64,
683 );
684
685 command_encoder.set_vertex_bytes(
686 QuadInputIndex::ViewportSize as u64,
687 mem::size_of_val(&viewport_size) as u64,
688 &viewport_size as *const Size<DevicePixels> as *const _,
689 );
690
691 let quad_bytes_len = mem::size_of_val(quads);
692 let buffer_contents =
693 unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
694
695 let next_offset = *instance_offset + quad_bytes_len;
696 if next_offset > instance_buffer.size {
697 return false;
698 }
699
700 unsafe {
701 ptr::copy_nonoverlapping(quads.as_ptr() as *const u8, buffer_contents, quad_bytes_len);
702 }
703
704 command_encoder.draw_primitives_instanced(
705 metal::MTLPrimitiveType::Triangle,
706 0,
707 6,
708 quads.len() as u64,
709 );
710 *instance_offset = next_offset;
711 true
712 }
713
714 fn draw_paths(
715 &self,
716 paths: &[Path<ScaledPixels>],
717 tiles_by_path_id: &HashMap<PathId, AtlasTile>,
718 instance_buffer: &mut InstanceBuffer,
719 instance_offset: &mut usize,
720 viewport_size: Size<DevicePixels>,
721 command_encoder: &metal::RenderCommandEncoderRef,
722 ) -> bool {
723 if paths.is_empty() {
724 return true;
725 }
726
727 command_encoder.set_render_pipeline_state(&self.path_sprites_pipeline_state);
728 command_encoder.set_vertex_buffer(
729 SpriteInputIndex::Vertices as u64,
730 Some(&self.unit_vertices),
731 0,
732 );
733 command_encoder.set_vertex_bytes(
734 SpriteInputIndex::ViewportSize as u64,
735 mem::size_of_val(&viewport_size) as u64,
736 &viewport_size as *const Size<DevicePixels> as *const _,
737 );
738
739 let mut prev_texture_id = None;
740 let mut sprites = SmallVec::<[_; 1]>::new();
741 let mut paths_and_tiles = paths
742 .iter()
743 .map(|path| (path, tiles_by_path_id.get(&path.id).unwrap()))
744 .peekable();
745
746 loop {
747 if let Some((path, tile)) = paths_and_tiles.peek() {
748 if prev_texture_id.map_or(true, |texture_id| texture_id == tile.texture_id) {
749 prev_texture_id = Some(tile.texture_id);
750 let origin = path.bounds.intersect(&path.content_mask.bounds).origin;
751 sprites.push(PathSprite {
752 bounds: Bounds {
753 origin: origin.map(|p| p.floor()),
754 size: tile.bounds.size.map(Into::into),
755 },
756 color: path.color,
757 tile: (*tile).clone(),
758 });
759 paths_and_tiles.next();
760 continue;
761 }
762 }
763
764 if sprites.is_empty() {
765 break;
766 } else {
767 align_offset(instance_offset);
768 let texture_id = prev_texture_id.take().unwrap();
769 let texture: metal::Texture = self.sprite_atlas.metal_texture(texture_id);
770 let texture_size = size(
771 DevicePixels(texture.width() as i32),
772 DevicePixels(texture.height() as i32),
773 );
774
775 command_encoder.set_vertex_buffer(
776 SpriteInputIndex::Sprites as u64,
777 Some(&instance_buffer.metal_buffer),
778 *instance_offset as u64,
779 );
780 command_encoder.set_vertex_bytes(
781 SpriteInputIndex::AtlasTextureSize as u64,
782 mem::size_of_val(&texture_size) as u64,
783 &texture_size as *const Size<DevicePixels> as *const _,
784 );
785 command_encoder.set_fragment_buffer(
786 SpriteInputIndex::Sprites as u64,
787 Some(&instance_buffer.metal_buffer),
788 *instance_offset as u64,
789 );
790 command_encoder
791 .set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
792
793 let sprite_bytes_len = mem::size_of_val(sprites.as_slice());
794 let next_offset = *instance_offset + sprite_bytes_len;
795 if next_offset > instance_buffer.size {
796 return false;
797 }
798
799 let buffer_contents = unsafe {
800 (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset)
801 };
802
803 unsafe {
804 ptr::copy_nonoverlapping(
805 sprites.as_ptr() as *const u8,
806 buffer_contents,
807 sprite_bytes_len,
808 );
809 }
810
811 command_encoder.draw_primitives_instanced(
812 metal::MTLPrimitiveType::Triangle,
813 0,
814 6,
815 sprites.len() as u64,
816 );
817 *instance_offset = next_offset;
818 sprites.clear();
819 }
820 }
821 true
822 }
823
824 fn draw_underlines(
825 &self,
826 underlines: &[Underline],
827 instance_buffer: &mut InstanceBuffer,
828 instance_offset: &mut usize,
829 viewport_size: Size<DevicePixels>,
830 command_encoder: &metal::RenderCommandEncoderRef,
831 ) -> bool {
832 if underlines.is_empty() {
833 return true;
834 }
835 align_offset(instance_offset);
836
837 command_encoder.set_render_pipeline_state(&self.underlines_pipeline_state);
838 command_encoder.set_vertex_buffer(
839 UnderlineInputIndex::Vertices as u64,
840 Some(&self.unit_vertices),
841 0,
842 );
843 command_encoder.set_vertex_buffer(
844 UnderlineInputIndex::Underlines as u64,
845 Some(&instance_buffer.metal_buffer),
846 *instance_offset as u64,
847 );
848 command_encoder.set_fragment_buffer(
849 UnderlineInputIndex::Underlines as u64,
850 Some(&instance_buffer.metal_buffer),
851 *instance_offset as u64,
852 );
853
854 command_encoder.set_vertex_bytes(
855 UnderlineInputIndex::ViewportSize as u64,
856 mem::size_of_val(&viewport_size) as u64,
857 &viewport_size as *const Size<DevicePixels> as *const _,
858 );
859
860 let underline_bytes_len = mem::size_of_val(underlines);
861 let buffer_contents =
862 unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
863
864 let next_offset = *instance_offset + underline_bytes_len;
865 if next_offset > instance_buffer.size {
866 return false;
867 }
868
869 unsafe {
870 ptr::copy_nonoverlapping(
871 underlines.as_ptr() as *const u8,
872 buffer_contents,
873 underline_bytes_len,
874 );
875 }
876
877 command_encoder.draw_primitives_instanced(
878 metal::MTLPrimitiveType::Triangle,
879 0,
880 6,
881 underlines.len() as u64,
882 );
883 *instance_offset = next_offset;
884 true
885 }
886
887 fn draw_monochrome_sprites(
888 &self,
889 texture_id: AtlasTextureId,
890 sprites: &[MonochromeSprite],
891 instance_buffer: &mut InstanceBuffer,
892 instance_offset: &mut usize,
893 viewport_size: Size<DevicePixels>,
894 command_encoder: &metal::RenderCommandEncoderRef,
895 ) -> bool {
896 if sprites.is_empty() {
897 return true;
898 }
899 align_offset(instance_offset);
900
901 let sprite_bytes_len = mem::size_of_val(sprites);
902 let buffer_contents =
903 unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
904
905 let next_offset = *instance_offset + sprite_bytes_len;
906 if next_offset > instance_buffer.size {
907 return false;
908 }
909
910 let texture = self.sprite_atlas.metal_texture(texture_id);
911 let texture_size = size(
912 DevicePixels(texture.width() as i32),
913 DevicePixels(texture.height() as i32),
914 );
915 command_encoder.set_render_pipeline_state(&self.monochrome_sprites_pipeline_state);
916 command_encoder.set_vertex_buffer(
917 SpriteInputIndex::Vertices as u64,
918 Some(&self.unit_vertices),
919 0,
920 );
921 command_encoder.set_vertex_buffer(
922 SpriteInputIndex::Sprites as u64,
923 Some(&instance_buffer.metal_buffer),
924 *instance_offset as u64,
925 );
926 command_encoder.set_vertex_bytes(
927 SpriteInputIndex::ViewportSize as u64,
928 mem::size_of_val(&viewport_size) as u64,
929 &viewport_size as *const Size<DevicePixels> as *const _,
930 );
931 command_encoder.set_vertex_bytes(
932 SpriteInputIndex::AtlasTextureSize as u64,
933 mem::size_of_val(&texture_size) as u64,
934 &texture_size as *const Size<DevicePixels> as *const _,
935 );
936 command_encoder.set_fragment_buffer(
937 SpriteInputIndex::Sprites as u64,
938 Some(&instance_buffer.metal_buffer),
939 *instance_offset as u64,
940 );
941 command_encoder.set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
942
943 unsafe {
944 ptr::copy_nonoverlapping(
945 sprites.as_ptr() as *const u8,
946 buffer_contents,
947 sprite_bytes_len,
948 );
949 }
950
951 command_encoder.draw_primitives_instanced(
952 metal::MTLPrimitiveType::Triangle,
953 0,
954 6,
955 sprites.len() as u64,
956 );
957 *instance_offset = next_offset;
958 true
959 }
960
961 fn draw_polychrome_sprites(
962 &self,
963 texture_id: AtlasTextureId,
964 sprites: &[PolychromeSprite],
965 instance_buffer: &mut InstanceBuffer,
966 instance_offset: &mut usize,
967 viewport_size: Size<DevicePixels>,
968 command_encoder: &metal::RenderCommandEncoderRef,
969 ) -> bool {
970 if sprites.is_empty() {
971 return true;
972 }
973 align_offset(instance_offset);
974
975 let texture = self.sprite_atlas.metal_texture(texture_id);
976 let texture_size = size(
977 DevicePixels(texture.width() as i32),
978 DevicePixels(texture.height() as i32),
979 );
980 command_encoder.set_render_pipeline_state(&self.polychrome_sprites_pipeline_state);
981 command_encoder.set_vertex_buffer(
982 SpriteInputIndex::Vertices as u64,
983 Some(&self.unit_vertices),
984 0,
985 );
986 command_encoder.set_vertex_buffer(
987 SpriteInputIndex::Sprites as u64,
988 Some(&instance_buffer.metal_buffer),
989 *instance_offset as u64,
990 );
991 command_encoder.set_vertex_bytes(
992 SpriteInputIndex::ViewportSize as u64,
993 mem::size_of_val(&viewport_size) as u64,
994 &viewport_size as *const Size<DevicePixels> as *const _,
995 );
996 command_encoder.set_vertex_bytes(
997 SpriteInputIndex::AtlasTextureSize as u64,
998 mem::size_of_val(&texture_size) as u64,
999 &texture_size as *const Size<DevicePixels> as *const _,
1000 );
1001 command_encoder.set_fragment_buffer(
1002 SpriteInputIndex::Sprites as u64,
1003 Some(&instance_buffer.metal_buffer),
1004 *instance_offset as u64,
1005 );
1006 command_encoder.set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
1007
1008 let sprite_bytes_len = mem::size_of_val(sprites);
1009 let buffer_contents =
1010 unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
1011
1012 let next_offset = *instance_offset + sprite_bytes_len;
1013 if next_offset > instance_buffer.size {
1014 return false;
1015 }
1016
1017 unsafe {
1018 ptr::copy_nonoverlapping(
1019 sprites.as_ptr() as *const u8,
1020 buffer_contents,
1021 sprite_bytes_len,
1022 );
1023 }
1024
1025 command_encoder.draw_primitives_instanced(
1026 metal::MTLPrimitiveType::Triangle,
1027 0,
1028 6,
1029 sprites.len() as u64,
1030 );
1031 *instance_offset = next_offset;
1032 true
1033 }
1034
1035 fn draw_surfaces(
1036 &mut self,
1037 surfaces: &[PaintSurface],
1038 instance_buffer: &mut InstanceBuffer,
1039 instance_offset: &mut usize,
1040 viewport_size: Size<DevicePixels>,
1041 command_encoder: &metal::RenderCommandEncoderRef,
1042 ) -> bool {
1043 command_encoder.set_render_pipeline_state(&self.surfaces_pipeline_state);
1044 command_encoder.set_vertex_buffer(
1045 SurfaceInputIndex::Vertices as u64,
1046 Some(&self.unit_vertices),
1047 0,
1048 );
1049 command_encoder.set_vertex_bytes(
1050 SurfaceInputIndex::ViewportSize as u64,
1051 mem::size_of_val(&viewport_size) as u64,
1052 &viewport_size as *const Size<DevicePixels> as *const _,
1053 );
1054
1055 for surface in surfaces {
1056 let texture_size = size(
1057 DevicePixels::from(surface.image_buffer.width() as i32),
1058 DevicePixels::from(surface.image_buffer.height() as i32),
1059 );
1060
1061 assert_eq!(
1062 surface.image_buffer.pixel_format_type(),
1063 media::core_video::kCVPixelFormatType_420YpCbCr8BiPlanarFullRange
1064 );
1065
1066 let y_texture = unsafe {
1067 self.core_video_texture_cache
1068 .create_texture_from_image(
1069 surface.image_buffer.as_concrete_TypeRef(),
1070 ptr::null(),
1071 MTLPixelFormat::R8Unorm,
1072 surface.image_buffer.plane_width(0),
1073 surface.image_buffer.plane_height(0),
1074 0,
1075 )
1076 .unwrap()
1077 };
1078 let cb_cr_texture = unsafe {
1079 self.core_video_texture_cache
1080 .create_texture_from_image(
1081 surface.image_buffer.as_concrete_TypeRef(),
1082 ptr::null(),
1083 MTLPixelFormat::RG8Unorm,
1084 surface.image_buffer.plane_width(1),
1085 surface.image_buffer.plane_height(1),
1086 1,
1087 )
1088 .unwrap()
1089 };
1090
1091 align_offset(instance_offset);
1092 let next_offset = *instance_offset + mem::size_of::<Surface>();
1093 if next_offset > instance_buffer.size {
1094 return false;
1095 }
1096
1097 command_encoder.set_vertex_buffer(
1098 SurfaceInputIndex::Surfaces as u64,
1099 Some(&instance_buffer.metal_buffer),
1100 *instance_offset as u64,
1101 );
1102 command_encoder.set_vertex_bytes(
1103 SurfaceInputIndex::TextureSize as u64,
1104 mem::size_of_val(&texture_size) as u64,
1105 &texture_size as *const Size<DevicePixels> as *const _,
1106 );
1107 command_encoder.set_fragment_texture(
1108 SurfaceInputIndex::YTexture as u64,
1109 Some(y_texture.as_texture_ref()),
1110 );
1111 command_encoder.set_fragment_texture(
1112 SurfaceInputIndex::CbCrTexture as u64,
1113 Some(cb_cr_texture.as_texture_ref()),
1114 );
1115
1116 unsafe {
1117 let buffer_contents = (instance_buffer.metal_buffer.contents() as *mut u8)
1118 .add(*instance_offset)
1119 as *mut SurfaceBounds;
1120 ptr::write(
1121 buffer_contents,
1122 SurfaceBounds {
1123 bounds: surface.bounds,
1124 content_mask: surface.content_mask.clone(),
1125 },
1126 );
1127 }
1128
1129 command_encoder.draw_primitives(metal::MTLPrimitiveType::Triangle, 0, 6);
1130 *instance_offset = next_offset;
1131 }
1132 true
1133 }
1134}
1135
1136fn build_pipeline_state(
1137 device: &metal::DeviceRef,
1138 library: &metal::LibraryRef,
1139 label: &str,
1140 vertex_fn_name: &str,
1141 fragment_fn_name: &str,
1142 pixel_format: metal::MTLPixelFormat,
1143) -> metal::RenderPipelineState {
1144 let vertex_fn = library
1145 .get_function(vertex_fn_name, None)
1146 .expect("error locating vertex function");
1147 let fragment_fn = library
1148 .get_function(fragment_fn_name, None)
1149 .expect("error locating fragment function");
1150
1151 let descriptor = metal::RenderPipelineDescriptor::new();
1152 descriptor.set_label(label);
1153 descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
1154 descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
1155 let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
1156 color_attachment.set_pixel_format(pixel_format);
1157 color_attachment.set_blending_enabled(true);
1158 color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
1159 color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
1160 color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::SourceAlpha);
1161 color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::One);
1162 color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
1163 color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::One);
1164
1165 device
1166 .new_render_pipeline_state(&descriptor)
1167 .expect("could not create render pipeline state")
1168}
1169
1170fn build_path_rasterization_pipeline_state(
1171 device: &metal::DeviceRef,
1172 library: &metal::LibraryRef,
1173 label: &str,
1174 vertex_fn_name: &str,
1175 fragment_fn_name: &str,
1176 pixel_format: metal::MTLPixelFormat,
1177 path_sample_count: u32,
1178) -> metal::RenderPipelineState {
1179 let vertex_fn = library
1180 .get_function(vertex_fn_name, None)
1181 .expect("error locating vertex function");
1182 let fragment_fn = library
1183 .get_function(fragment_fn_name, None)
1184 .expect("error locating fragment function");
1185
1186 let descriptor = metal::RenderPipelineDescriptor::new();
1187 descriptor.set_label(label);
1188 descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
1189 descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
1190 if path_sample_count > 1 {
1191 descriptor.set_raster_sample_count(path_sample_count as _);
1192 descriptor.set_alpha_to_coverage_enabled(true);
1193 }
1194 let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
1195 color_attachment.set_pixel_format(pixel_format);
1196 color_attachment.set_blending_enabled(true);
1197 color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
1198 color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
1199 color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::One);
1200 color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::One);
1201 color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::One);
1202 color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::One);
1203
1204 device
1205 .new_render_pipeline_state(&descriptor)
1206 .expect("could not create render pipeline state")
1207}
1208
1209// Align to multiples of 256 make Metal happy.
1210fn align_offset(offset: &mut usize) {
1211 *offset = ((*offset + 255) / 256) * 256;
1212}
1213
1214#[repr(C)]
1215enum ShadowInputIndex {
1216 Vertices = 0,
1217 Shadows = 1,
1218 ViewportSize = 2,
1219}
1220
1221#[repr(C)]
1222enum QuadInputIndex {
1223 Vertices = 0,
1224 Quads = 1,
1225 ViewportSize = 2,
1226}
1227
1228#[repr(C)]
1229enum UnderlineInputIndex {
1230 Vertices = 0,
1231 Underlines = 1,
1232 ViewportSize = 2,
1233}
1234
1235#[repr(C)]
1236enum SpriteInputIndex {
1237 Vertices = 0,
1238 Sprites = 1,
1239 ViewportSize = 2,
1240 AtlasTextureSize = 3,
1241 AtlasTexture = 4,
1242}
1243
1244#[repr(C)]
1245enum SurfaceInputIndex {
1246 Vertices = 0,
1247 Surfaces = 1,
1248 ViewportSize = 2,
1249 TextureSize = 3,
1250 YTexture = 4,
1251 CbCrTexture = 5,
1252}
1253
1254#[repr(C)]
1255enum PathRasterizationInputIndex {
1256 Vertices = 0,
1257 AtlasTextureSize = 1,
1258}
1259
1260#[derive(Clone, Debug, Eq, PartialEq)]
1261#[repr(C)]
1262pub struct PathSprite {
1263 pub bounds: Bounds<ScaledPixels>,
1264 pub color: Background,
1265 pub tile: AtlasTile,
1266}
1267
1268#[derive(Clone, Debug, Eq, PartialEq)]
1269#[repr(C)]
1270pub struct SurfaceBounds {
1271 pub bounds: Bounds<ScaledPixels>,
1272 pub content_mask: ContentMask<ScaledPixels>,
1273}