1use super::metal_atlas::MetalAtlas;
2use crate::{
3 point, size, AtlasTextureId, AtlasTextureKind, AtlasTile, Bounds, ContentMask, DevicePixels,
4 Hsla, MonochromeSprite, Path, PathId, PathVertex, PolychromeSprite, PrimitiveBatch, Quad,
5 ScaledPixels, Scene, Shadow, Size, Surface, Underline,
6};
7use anyhow::{anyhow, Result};
8use block::ConcreteBlock;
9use cocoa::{
10 base::{NO, YES},
11 foundation::NSUInteger,
12 quartzcore::AutoresizingMask,
13};
14use collections::HashMap;
15use core_foundation::base::TCFType;
16use foreign_types::ForeignType;
17use media::core_video::CVMetalTextureCache;
18use metal::{CAMetalLayer, CommandQueue, MTLPixelFormat, MTLResourceOptions, NSRange};
19use objc::{self, msg_send, sel, sel_impl};
20use parking_lot::Mutex;
21use smallvec::SmallVec;
22use std::{cell::Cell, ffi::c_void, mem, ptr, sync::Arc};
23
24// Exported to metal
25pub(crate) type PointF = crate::Point<f32>;
26
27#[cfg(not(feature = "runtime_shaders"))]
28const SHADERS_METALLIB: &[u8] = include_bytes!(concat!(env!("OUT_DIR"), "/shaders.metallib"));
29#[cfg(feature = "runtime_shaders")]
30const SHADERS_SOURCE_FILE: &str = include_str!(concat!(env!("OUT_DIR"), "/stitched_shaders.metal"));
31
32pub type Context = Arc<Mutex<InstanceBufferPool>>;
33pub type Renderer = MetalRenderer;
34
35pub unsafe fn new_renderer(
36 context: self::Context,
37 _native_window: *mut c_void,
38 _native_view: *mut c_void,
39 _bounds: crate::Size<f32>,
40 _transparent: bool,
41) -> Renderer {
42 MetalRenderer::new(context)
43}
44
45pub(crate) struct InstanceBufferPool {
46 buffer_size: usize,
47 buffers: Vec<metal::Buffer>,
48}
49
50impl Default for InstanceBufferPool {
51 fn default() -> Self {
52 Self {
53 buffer_size: 2 * 1024 * 1024,
54 buffers: Vec::new(),
55 }
56 }
57}
58
59pub(crate) struct InstanceBuffer {
60 metal_buffer: metal::Buffer,
61 size: usize,
62}
63
64impl InstanceBufferPool {
65 pub(crate) fn reset(&mut self, buffer_size: usize) {
66 self.buffer_size = buffer_size;
67 self.buffers.clear();
68 }
69
70 pub(crate) fn acquire(&mut self, device: &metal::Device) -> InstanceBuffer {
71 let buffer = self.buffers.pop().unwrap_or_else(|| {
72 device.new_buffer(
73 self.buffer_size as u64,
74 MTLResourceOptions::StorageModeManaged,
75 )
76 });
77 InstanceBuffer {
78 metal_buffer: buffer,
79 size: self.buffer_size,
80 }
81 }
82
83 pub(crate) fn release(&mut self, buffer: InstanceBuffer) {
84 if buffer.size == self.buffer_size {
85 self.buffers.push(buffer.metal_buffer)
86 }
87 }
88}
89
90pub(crate) struct MetalRenderer {
91 device: metal::Device,
92 layer: metal::MetalLayer,
93 presents_with_transaction: bool,
94 command_queue: CommandQueue,
95 paths_rasterization_pipeline_state: metal::RenderPipelineState,
96 path_sprites_pipeline_state: metal::RenderPipelineState,
97 shadows_pipeline_state: metal::RenderPipelineState,
98 quads_pipeline_state: metal::RenderPipelineState,
99 underlines_pipeline_state: metal::RenderPipelineState,
100 monochrome_sprites_pipeline_state: metal::RenderPipelineState,
101 polychrome_sprites_pipeline_state: metal::RenderPipelineState,
102 surfaces_pipeline_state: metal::RenderPipelineState,
103 unit_vertices: metal::Buffer,
104 #[allow(clippy::arc_with_non_send_sync)]
105 instance_buffer_pool: Arc<Mutex<InstanceBufferPool>>,
106 sprite_atlas: Arc<MetalAtlas>,
107 core_video_texture_cache: CVMetalTextureCache,
108}
109
110impl MetalRenderer {
111 pub fn new(instance_buffer_pool: Arc<Mutex<InstanceBufferPool>>) -> Self {
112 let device: metal::Device = if let Some(device) = metal::Device::system_default() {
113 device
114 } else {
115 log::error!("unable to access a compatible graphics device");
116 std::process::exit(1);
117 };
118
119 let layer = metal::MetalLayer::new();
120 layer.set_device(&device);
121 layer.set_pixel_format(MTLPixelFormat::BGRA8Unorm);
122 layer.set_opaque(false);
123 layer.set_maximum_drawable_count(3);
124 unsafe {
125 let _: () = msg_send![&*layer, setAllowsNextDrawableTimeout: NO];
126 let _: () = msg_send![&*layer, setNeedsDisplayOnBoundsChange: YES];
127 let _: () = msg_send![
128 &*layer,
129 setAutoresizingMask: AutoresizingMask::WIDTH_SIZABLE
130 | AutoresizingMask::HEIGHT_SIZABLE
131 ];
132 }
133 #[cfg(feature = "runtime_shaders")]
134 let library = device
135 .new_library_with_source(&SHADERS_SOURCE_FILE, &metal::CompileOptions::new())
136 .expect("error building metal library");
137 #[cfg(not(feature = "runtime_shaders"))]
138 let library = device
139 .new_library_with_data(SHADERS_METALLIB)
140 .expect("error building metal library");
141
142 fn to_float2_bits(point: PointF) -> u64 {
143 let mut output = point.y.to_bits() as u64;
144 output <<= 32;
145 output |= point.x.to_bits() as u64;
146 output
147 }
148
149 let unit_vertices = [
150 to_float2_bits(point(0., 0.)),
151 to_float2_bits(point(1., 0.)),
152 to_float2_bits(point(0., 1.)),
153 to_float2_bits(point(0., 1.)),
154 to_float2_bits(point(1., 0.)),
155 to_float2_bits(point(1., 1.)),
156 ];
157 let unit_vertices = device.new_buffer_with_data(
158 unit_vertices.as_ptr() as *const c_void,
159 mem::size_of_val(&unit_vertices) as u64,
160 MTLResourceOptions::StorageModeManaged,
161 );
162
163 let paths_rasterization_pipeline_state = build_path_rasterization_pipeline_state(
164 &device,
165 &library,
166 "paths_rasterization",
167 "path_rasterization_vertex",
168 "path_rasterization_fragment",
169 MTLPixelFormat::R16Float,
170 );
171 let path_sprites_pipeline_state = build_pipeline_state(
172 &device,
173 &library,
174 "path_sprites",
175 "path_sprite_vertex",
176 "path_sprite_fragment",
177 MTLPixelFormat::BGRA8Unorm,
178 );
179 let shadows_pipeline_state = build_pipeline_state(
180 &device,
181 &library,
182 "shadows",
183 "shadow_vertex",
184 "shadow_fragment",
185 MTLPixelFormat::BGRA8Unorm,
186 );
187 let quads_pipeline_state = build_pipeline_state(
188 &device,
189 &library,
190 "quads",
191 "quad_vertex",
192 "quad_fragment",
193 MTLPixelFormat::BGRA8Unorm,
194 );
195 let underlines_pipeline_state = build_pipeline_state(
196 &device,
197 &library,
198 "underlines",
199 "underline_vertex",
200 "underline_fragment",
201 MTLPixelFormat::BGRA8Unorm,
202 );
203 let monochrome_sprites_pipeline_state = build_pipeline_state(
204 &device,
205 &library,
206 "monochrome_sprites",
207 "monochrome_sprite_vertex",
208 "monochrome_sprite_fragment",
209 MTLPixelFormat::BGRA8Unorm,
210 );
211 let polychrome_sprites_pipeline_state = build_pipeline_state(
212 &device,
213 &library,
214 "polychrome_sprites",
215 "polychrome_sprite_vertex",
216 "polychrome_sprite_fragment",
217 MTLPixelFormat::BGRA8Unorm,
218 );
219 let surfaces_pipeline_state = build_pipeline_state(
220 &device,
221 &library,
222 "surfaces",
223 "surface_vertex",
224 "surface_fragment",
225 MTLPixelFormat::BGRA8Unorm,
226 );
227
228 let command_queue = device.new_command_queue();
229 let sprite_atlas = Arc::new(MetalAtlas::new(device.clone()));
230 let core_video_texture_cache =
231 unsafe { CVMetalTextureCache::new(device.as_ptr()).unwrap() };
232
233 Self {
234 device,
235 layer,
236 presents_with_transaction: false,
237 command_queue,
238 paths_rasterization_pipeline_state,
239 path_sprites_pipeline_state,
240 shadows_pipeline_state,
241 quads_pipeline_state,
242 underlines_pipeline_state,
243 monochrome_sprites_pipeline_state,
244 polychrome_sprites_pipeline_state,
245 surfaces_pipeline_state,
246 unit_vertices,
247 instance_buffer_pool,
248 sprite_atlas,
249 core_video_texture_cache,
250 }
251 }
252
253 pub fn layer(&self) -> &metal::MetalLayerRef {
254 &self.layer
255 }
256
257 pub fn layer_ptr(&self) -> *mut CAMetalLayer {
258 self.layer.as_ptr()
259 }
260
261 pub fn sprite_atlas(&self) -> &Arc<MetalAtlas> {
262 &self.sprite_atlas
263 }
264
265 pub fn set_presents_with_transaction(&mut self, presents_with_transaction: bool) {
266 self.presents_with_transaction = presents_with_transaction;
267 self.layer
268 .set_presents_with_transaction(presents_with_transaction);
269 }
270
271 pub fn update_drawable_size(&mut self, size: Size<f64>) {
272 unsafe {
273 let _: () = msg_send![
274 self.layer(),
275 setDrawableSize: size
276 ];
277 }
278 }
279
280 pub fn update_transparency(&mut self, _transparent: bool) {
281 // todo(mac)?
282 }
283
284 pub fn destroy(&mut self) {
285 // nothing to do
286 }
287
288 pub fn draw(&mut self, scene: &Scene) {
289 let layer = self.layer.clone();
290 let viewport_size = layer.drawable_size();
291 let viewport_size: Size<DevicePixels> = size(
292 (viewport_size.width.ceil() as i32).into(),
293 (viewport_size.height.ceil() as i32).into(),
294 );
295 let drawable = if let Some(drawable) = layer.next_drawable() {
296 drawable
297 } else {
298 log::error!(
299 "failed to retrieve next drawable, drawable size: {:?}",
300 viewport_size
301 );
302 return;
303 };
304
305 loop {
306 let mut instance_buffer = self.instance_buffer_pool.lock().acquire(&self.device);
307
308 let command_buffer =
309 self.draw_primitives(scene, &mut instance_buffer, drawable, viewport_size);
310
311 match command_buffer {
312 Ok(command_buffer) => {
313 let instance_buffer_pool = self.instance_buffer_pool.clone();
314 let instance_buffer = Cell::new(Some(instance_buffer));
315 let block = ConcreteBlock::new(move |_| {
316 if let Some(instance_buffer) = instance_buffer.take() {
317 instance_buffer_pool.lock().release(instance_buffer);
318 }
319 });
320 let block = block.copy();
321 command_buffer.add_completed_handler(&block);
322
323 if self.presents_with_transaction {
324 command_buffer.commit();
325 command_buffer.wait_until_scheduled();
326 drawable.present();
327 } else {
328 command_buffer.present_drawable(drawable);
329 command_buffer.commit();
330 }
331 return;
332 }
333 Err(err) => {
334 log::error!(
335 "failed to render: {}. retrying with larger instance buffer size",
336 err
337 );
338 let mut instance_buffer_pool = self.instance_buffer_pool.lock();
339 let buffer_size = instance_buffer_pool.buffer_size;
340 if buffer_size >= 256 * 1024 * 1024 {
341 log::error!("instance buffer size grew too large: {}", buffer_size);
342 break;
343 }
344 instance_buffer_pool.reset(buffer_size * 2);
345 log::info!(
346 "increased instance buffer size to {}",
347 instance_buffer_pool.buffer_size
348 );
349 }
350 }
351 }
352 }
353
354 fn draw_primitives(
355 &mut self,
356 scene: &Scene,
357 instance_buffer: &mut InstanceBuffer,
358 drawable: &metal::MetalDrawableRef,
359 viewport_size: Size<DevicePixels>,
360 ) -> Result<metal::CommandBuffer> {
361 let command_queue = self.command_queue.clone();
362 let command_buffer = command_queue.new_command_buffer();
363 let mut instance_offset = 0;
364
365 let Some(path_tiles) = self.rasterize_paths(
366 scene.paths(),
367 instance_buffer,
368 &mut instance_offset,
369 command_buffer,
370 ) else {
371 return Err(anyhow!("failed to rasterize {} paths", scene.paths().len()));
372 };
373
374 let render_pass_descriptor = metal::RenderPassDescriptor::new();
375 let color_attachment = render_pass_descriptor
376 .color_attachments()
377 .object_at(0)
378 .unwrap();
379
380 color_attachment.set_texture(Some(drawable.texture()));
381 color_attachment.set_load_action(metal::MTLLoadAction::Clear);
382 color_attachment.set_store_action(metal::MTLStoreAction::Store);
383 let alpha = if self.layer.is_opaque() { 1. } else { 0. };
384 color_attachment.set_clear_color(metal::MTLClearColor::new(0., 0., 0., alpha));
385 let command_encoder = command_buffer.new_render_command_encoder(render_pass_descriptor);
386
387 command_encoder.set_viewport(metal::MTLViewport {
388 originX: 0.0,
389 originY: 0.0,
390 width: i32::from(viewport_size.width) as f64,
391 height: i32::from(viewport_size.height) as f64,
392 znear: 0.0,
393 zfar: 1.0,
394 });
395
396 for batch in scene.batches() {
397 let ok = match batch {
398 PrimitiveBatch::Shadows(shadows) => self.draw_shadows(
399 shadows,
400 instance_buffer,
401 &mut instance_offset,
402 viewport_size,
403 command_encoder,
404 ),
405 PrimitiveBatch::Quads(quads) => self.draw_quads(
406 quads,
407 instance_buffer,
408 &mut instance_offset,
409 viewport_size,
410 command_encoder,
411 ),
412 PrimitiveBatch::Paths(paths) => self.draw_paths(
413 paths,
414 &path_tiles,
415 instance_buffer,
416 &mut instance_offset,
417 viewport_size,
418 command_encoder,
419 ),
420 PrimitiveBatch::Underlines(underlines) => self.draw_underlines(
421 underlines,
422 instance_buffer,
423 &mut instance_offset,
424 viewport_size,
425 command_encoder,
426 ),
427 PrimitiveBatch::MonochromeSprites {
428 texture_id,
429 sprites,
430 } => self.draw_monochrome_sprites(
431 texture_id,
432 sprites,
433 instance_buffer,
434 &mut instance_offset,
435 viewport_size,
436 command_encoder,
437 ),
438 PrimitiveBatch::PolychromeSprites {
439 texture_id,
440 sprites,
441 } => self.draw_polychrome_sprites(
442 texture_id,
443 sprites,
444 instance_buffer,
445 &mut instance_offset,
446 viewport_size,
447 command_encoder,
448 ),
449 PrimitiveBatch::Surfaces(surfaces) => self.draw_surfaces(
450 surfaces,
451 instance_buffer,
452 &mut instance_offset,
453 viewport_size,
454 command_encoder,
455 ),
456 };
457
458 if !ok {
459 command_encoder.end_encoding();
460 return Err(anyhow!("scene too large: {} paths, {} shadows, {} quads, {} underlines, {} mono, {} poly, {} surfaces",
461 scene.paths.len(),
462 scene.shadows.len(),
463 scene.quads.len(),
464 scene.underlines.len(),
465 scene.monochrome_sprites.len(),
466 scene.polychrome_sprites.len(),
467 scene.surfaces.len(),
468 ));
469 }
470 }
471
472 command_encoder.end_encoding();
473
474 instance_buffer.metal_buffer.did_modify_range(NSRange {
475 location: 0,
476 length: instance_offset as NSUInteger,
477 });
478 Ok(command_buffer.to_owned())
479 }
480
481 fn rasterize_paths(
482 &mut self,
483 paths: &[Path<ScaledPixels>],
484 instance_buffer: &mut InstanceBuffer,
485 instance_offset: &mut usize,
486 command_buffer: &metal::CommandBufferRef,
487 ) -> Option<HashMap<PathId, AtlasTile>> {
488 self.sprite_atlas.clear_textures(AtlasTextureKind::Path);
489
490 let mut tiles = HashMap::default();
491 let mut vertices_by_texture_id = HashMap::default();
492 for path in paths {
493 let clipped_bounds = path.bounds.intersect(&path.content_mask.bounds);
494
495 let tile = self
496 .sprite_atlas
497 .allocate(clipped_bounds.size.map(Into::into), AtlasTextureKind::Path)?;
498 vertices_by_texture_id
499 .entry(tile.texture_id)
500 .or_insert(Vec::new())
501 .extend(path.vertices.iter().map(|vertex| PathVertex {
502 xy_position: vertex.xy_position - clipped_bounds.origin
503 + tile.bounds.origin.map(Into::into),
504 st_position: vertex.st_position,
505 content_mask: ContentMask {
506 bounds: tile.bounds.map(Into::into),
507 },
508 }));
509 tiles.insert(path.id, tile);
510 }
511
512 for (texture_id, vertices) in vertices_by_texture_id {
513 align_offset(instance_offset);
514 let vertices_bytes_len = mem::size_of_val(vertices.as_slice());
515 let next_offset = *instance_offset + vertices_bytes_len;
516 if next_offset > instance_buffer.size {
517 return None;
518 }
519
520 let render_pass_descriptor = metal::RenderPassDescriptor::new();
521 let color_attachment = render_pass_descriptor
522 .color_attachments()
523 .object_at(0)
524 .unwrap();
525
526 let texture = self.sprite_atlas.metal_texture(texture_id);
527 color_attachment.set_texture(Some(&texture));
528 color_attachment.set_load_action(metal::MTLLoadAction::Clear);
529 color_attachment.set_store_action(metal::MTLStoreAction::Store);
530 color_attachment.set_clear_color(metal::MTLClearColor::new(0., 0., 0., 1.));
531 let command_encoder = command_buffer.new_render_command_encoder(render_pass_descriptor);
532 command_encoder.set_render_pipeline_state(&self.paths_rasterization_pipeline_state);
533 command_encoder.set_vertex_buffer(
534 PathRasterizationInputIndex::Vertices as u64,
535 Some(&instance_buffer.metal_buffer),
536 *instance_offset as u64,
537 );
538 let texture_size = Size {
539 width: DevicePixels::from(texture.width()),
540 height: DevicePixels::from(texture.height()),
541 };
542 command_encoder.set_vertex_bytes(
543 PathRasterizationInputIndex::AtlasTextureSize as u64,
544 mem::size_of_val(&texture_size) as u64,
545 &texture_size as *const Size<DevicePixels> as *const _,
546 );
547
548 let buffer_contents = unsafe {
549 (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset)
550 };
551 unsafe {
552 ptr::copy_nonoverlapping(
553 vertices.as_ptr() as *const u8,
554 buffer_contents,
555 vertices_bytes_len,
556 );
557 }
558
559 command_encoder.draw_primitives(
560 metal::MTLPrimitiveType::Triangle,
561 0,
562 vertices.len() as u64,
563 );
564 command_encoder.end_encoding();
565 *instance_offset = next_offset;
566 }
567
568 Some(tiles)
569 }
570
571 fn draw_shadows(
572 &mut self,
573 shadows: &[Shadow],
574 instance_buffer: &mut InstanceBuffer,
575 instance_offset: &mut usize,
576 viewport_size: Size<DevicePixels>,
577 command_encoder: &metal::RenderCommandEncoderRef,
578 ) -> bool {
579 if shadows.is_empty() {
580 return true;
581 }
582 align_offset(instance_offset);
583
584 command_encoder.set_render_pipeline_state(&self.shadows_pipeline_state);
585 command_encoder.set_vertex_buffer(
586 ShadowInputIndex::Vertices as u64,
587 Some(&self.unit_vertices),
588 0,
589 );
590 command_encoder.set_vertex_buffer(
591 ShadowInputIndex::Shadows as u64,
592 Some(&instance_buffer.metal_buffer),
593 *instance_offset as u64,
594 );
595 command_encoder.set_fragment_buffer(
596 ShadowInputIndex::Shadows as u64,
597 Some(&instance_buffer.metal_buffer),
598 *instance_offset as u64,
599 );
600
601 command_encoder.set_vertex_bytes(
602 ShadowInputIndex::ViewportSize as u64,
603 mem::size_of_val(&viewport_size) as u64,
604 &viewport_size as *const Size<DevicePixels> as *const _,
605 );
606
607 let shadow_bytes_len = mem::size_of_val(shadows);
608 let buffer_contents =
609 unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
610
611 let next_offset = *instance_offset + shadow_bytes_len;
612 if next_offset > instance_buffer.size {
613 return false;
614 }
615
616 unsafe {
617 ptr::copy_nonoverlapping(
618 shadows.as_ptr() as *const u8,
619 buffer_contents,
620 shadow_bytes_len,
621 );
622 }
623
624 command_encoder.draw_primitives_instanced(
625 metal::MTLPrimitiveType::Triangle,
626 0,
627 6,
628 shadows.len() as u64,
629 );
630 *instance_offset = next_offset;
631 true
632 }
633
634 fn draw_quads(
635 &mut self,
636 quads: &[Quad],
637 instance_buffer: &mut InstanceBuffer,
638 instance_offset: &mut usize,
639 viewport_size: Size<DevicePixels>,
640 command_encoder: &metal::RenderCommandEncoderRef,
641 ) -> bool {
642 if quads.is_empty() {
643 return true;
644 }
645 align_offset(instance_offset);
646
647 command_encoder.set_render_pipeline_state(&self.quads_pipeline_state);
648 command_encoder.set_vertex_buffer(
649 QuadInputIndex::Vertices as u64,
650 Some(&self.unit_vertices),
651 0,
652 );
653 command_encoder.set_vertex_buffer(
654 QuadInputIndex::Quads as u64,
655 Some(&instance_buffer.metal_buffer),
656 *instance_offset as u64,
657 );
658 command_encoder.set_fragment_buffer(
659 QuadInputIndex::Quads as u64,
660 Some(&instance_buffer.metal_buffer),
661 *instance_offset as u64,
662 );
663
664 command_encoder.set_vertex_bytes(
665 QuadInputIndex::ViewportSize as u64,
666 mem::size_of_val(&viewport_size) as u64,
667 &viewport_size as *const Size<DevicePixels> as *const _,
668 );
669
670 let quad_bytes_len = mem::size_of_val(quads);
671 let buffer_contents =
672 unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
673
674 let next_offset = *instance_offset + quad_bytes_len;
675 if next_offset > instance_buffer.size {
676 return false;
677 }
678
679 unsafe {
680 ptr::copy_nonoverlapping(quads.as_ptr() as *const u8, buffer_contents, quad_bytes_len);
681 }
682
683 command_encoder.draw_primitives_instanced(
684 metal::MTLPrimitiveType::Triangle,
685 0,
686 6,
687 quads.len() as u64,
688 );
689 *instance_offset = next_offset;
690 true
691 }
692
693 fn draw_paths(
694 &mut self,
695 paths: &[Path<ScaledPixels>],
696 tiles_by_path_id: &HashMap<PathId, AtlasTile>,
697 instance_buffer: &mut InstanceBuffer,
698 instance_offset: &mut usize,
699 viewport_size: Size<DevicePixels>,
700 command_encoder: &metal::RenderCommandEncoderRef,
701 ) -> bool {
702 if paths.is_empty() {
703 return true;
704 }
705
706 command_encoder.set_render_pipeline_state(&self.path_sprites_pipeline_state);
707 command_encoder.set_vertex_buffer(
708 SpriteInputIndex::Vertices as u64,
709 Some(&self.unit_vertices),
710 0,
711 );
712 command_encoder.set_vertex_bytes(
713 SpriteInputIndex::ViewportSize as u64,
714 mem::size_of_val(&viewport_size) as u64,
715 &viewport_size as *const Size<DevicePixels> as *const _,
716 );
717
718 let mut prev_texture_id = None;
719 let mut sprites = SmallVec::<[_; 1]>::new();
720 let mut paths_and_tiles = paths
721 .iter()
722 .map(|path| (path, tiles_by_path_id.get(&path.id).unwrap()))
723 .peekable();
724
725 loop {
726 if let Some((path, tile)) = paths_and_tiles.peek() {
727 if prev_texture_id.map_or(true, |texture_id| texture_id == tile.texture_id) {
728 prev_texture_id = Some(tile.texture_id);
729 let origin = path.bounds.intersect(&path.content_mask.bounds).origin;
730 sprites.push(PathSprite {
731 bounds: Bounds {
732 origin: origin.map(|p| p.floor()),
733 size: tile.bounds.size.map(Into::into),
734 },
735 color: path.color,
736 tile: (*tile).clone(),
737 });
738 paths_and_tiles.next();
739 continue;
740 }
741 }
742
743 if sprites.is_empty() {
744 break;
745 } else {
746 align_offset(instance_offset);
747 let texture_id = prev_texture_id.take().unwrap();
748 let texture: metal::Texture = self.sprite_atlas.metal_texture(texture_id);
749 let texture_size = size(
750 DevicePixels(texture.width() as i32),
751 DevicePixels(texture.height() as i32),
752 );
753
754 command_encoder.set_vertex_buffer(
755 SpriteInputIndex::Sprites as u64,
756 Some(&instance_buffer.metal_buffer),
757 *instance_offset as u64,
758 );
759 command_encoder.set_vertex_bytes(
760 SpriteInputIndex::AtlasTextureSize as u64,
761 mem::size_of_val(&texture_size) as u64,
762 &texture_size as *const Size<DevicePixels> as *const _,
763 );
764 command_encoder.set_fragment_buffer(
765 SpriteInputIndex::Sprites as u64,
766 Some(&instance_buffer.metal_buffer),
767 *instance_offset as u64,
768 );
769 command_encoder
770 .set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
771
772 let sprite_bytes_len = mem::size_of_val(sprites.as_slice());
773 let next_offset = *instance_offset + sprite_bytes_len;
774 if next_offset > instance_buffer.size {
775 return false;
776 }
777
778 let buffer_contents = unsafe {
779 (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset)
780 };
781
782 unsafe {
783 ptr::copy_nonoverlapping(
784 sprites.as_ptr() as *const u8,
785 buffer_contents,
786 sprite_bytes_len,
787 );
788 }
789
790 command_encoder.draw_primitives_instanced(
791 metal::MTLPrimitiveType::Triangle,
792 0,
793 6,
794 sprites.len() as u64,
795 );
796 *instance_offset = next_offset;
797 sprites.clear();
798 }
799 }
800 true
801 }
802
803 fn draw_underlines(
804 &mut self,
805 underlines: &[Underline],
806 instance_buffer: &mut InstanceBuffer,
807 instance_offset: &mut usize,
808 viewport_size: Size<DevicePixels>,
809 command_encoder: &metal::RenderCommandEncoderRef,
810 ) -> bool {
811 if underlines.is_empty() {
812 return true;
813 }
814 align_offset(instance_offset);
815
816 command_encoder.set_render_pipeline_state(&self.underlines_pipeline_state);
817 command_encoder.set_vertex_buffer(
818 UnderlineInputIndex::Vertices as u64,
819 Some(&self.unit_vertices),
820 0,
821 );
822 command_encoder.set_vertex_buffer(
823 UnderlineInputIndex::Underlines as u64,
824 Some(&instance_buffer.metal_buffer),
825 *instance_offset as u64,
826 );
827 command_encoder.set_fragment_buffer(
828 UnderlineInputIndex::Underlines as u64,
829 Some(&instance_buffer.metal_buffer),
830 *instance_offset as u64,
831 );
832
833 command_encoder.set_vertex_bytes(
834 UnderlineInputIndex::ViewportSize as u64,
835 mem::size_of_val(&viewport_size) as u64,
836 &viewport_size as *const Size<DevicePixels> as *const _,
837 );
838
839 let underline_bytes_len = mem::size_of_val(underlines);
840 let buffer_contents =
841 unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
842
843 let next_offset = *instance_offset + underline_bytes_len;
844 if next_offset > instance_buffer.size {
845 return false;
846 }
847
848 unsafe {
849 ptr::copy_nonoverlapping(
850 underlines.as_ptr() as *const u8,
851 buffer_contents,
852 underline_bytes_len,
853 );
854 }
855
856 command_encoder.draw_primitives_instanced(
857 metal::MTLPrimitiveType::Triangle,
858 0,
859 6,
860 underlines.len() as u64,
861 );
862 *instance_offset = next_offset;
863 true
864 }
865
866 fn draw_monochrome_sprites(
867 &mut self,
868 texture_id: AtlasTextureId,
869 sprites: &[MonochromeSprite],
870 instance_buffer: &mut InstanceBuffer,
871 instance_offset: &mut usize,
872 viewport_size: Size<DevicePixels>,
873 command_encoder: &metal::RenderCommandEncoderRef,
874 ) -> bool {
875 if sprites.is_empty() {
876 return true;
877 }
878 align_offset(instance_offset);
879
880 let sprite_bytes_len = mem::size_of_val(sprites);
881 let buffer_contents =
882 unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
883
884 let next_offset = *instance_offset + sprite_bytes_len;
885 if next_offset > instance_buffer.size {
886 return false;
887 }
888
889 let texture = self.sprite_atlas.metal_texture(texture_id);
890 let texture_size = size(
891 DevicePixels(texture.width() as i32),
892 DevicePixels(texture.height() as i32),
893 );
894 command_encoder.set_render_pipeline_state(&self.monochrome_sprites_pipeline_state);
895 command_encoder.set_vertex_buffer(
896 SpriteInputIndex::Vertices as u64,
897 Some(&self.unit_vertices),
898 0,
899 );
900 command_encoder.set_vertex_buffer(
901 SpriteInputIndex::Sprites as u64,
902 Some(&instance_buffer.metal_buffer),
903 *instance_offset as u64,
904 );
905 command_encoder.set_vertex_bytes(
906 SpriteInputIndex::ViewportSize as u64,
907 mem::size_of_val(&viewport_size) as u64,
908 &viewport_size as *const Size<DevicePixels> as *const _,
909 );
910 command_encoder.set_vertex_bytes(
911 SpriteInputIndex::AtlasTextureSize as u64,
912 mem::size_of_val(&texture_size) as u64,
913 &texture_size as *const Size<DevicePixels> as *const _,
914 );
915 command_encoder.set_fragment_buffer(
916 SpriteInputIndex::Sprites as u64,
917 Some(&instance_buffer.metal_buffer),
918 *instance_offset as u64,
919 );
920 command_encoder.set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
921
922 unsafe {
923 ptr::copy_nonoverlapping(
924 sprites.as_ptr() as *const u8,
925 buffer_contents,
926 sprite_bytes_len,
927 );
928 }
929
930 command_encoder.draw_primitives_instanced(
931 metal::MTLPrimitiveType::Triangle,
932 0,
933 6,
934 sprites.len() as u64,
935 );
936 *instance_offset = next_offset;
937 true
938 }
939
940 fn draw_polychrome_sprites(
941 &mut self,
942 texture_id: AtlasTextureId,
943 sprites: &[PolychromeSprite],
944 instance_buffer: &mut InstanceBuffer,
945 instance_offset: &mut usize,
946 viewport_size: Size<DevicePixels>,
947 command_encoder: &metal::RenderCommandEncoderRef,
948 ) -> bool {
949 if sprites.is_empty() {
950 return true;
951 }
952 align_offset(instance_offset);
953
954 let texture = self.sprite_atlas.metal_texture(texture_id);
955 let texture_size = size(
956 DevicePixels(texture.width() as i32),
957 DevicePixels(texture.height() as i32),
958 );
959 command_encoder.set_render_pipeline_state(&self.polychrome_sprites_pipeline_state);
960 command_encoder.set_vertex_buffer(
961 SpriteInputIndex::Vertices as u64,
962 Some(&self.unit_vertices),
963 0,
964 );
965 command_encoder.set_vertex_buffer(
966 SpriteInputIndex::Sprites as u64,
967 Some(&instance_buffer.metal_buffer),
968 *instance_offset as u64,
969 );
970 command_encoder.set_vertex_bytes(
971 SpriteInputIndex::ViewportSize as u64,
972 mem::size_of_val(&viewport_size) as u64,
973 &viewport_size as *const Size<DevicePixels> as *const _,
974 );
975 command_encoder.set_vertex_bytes(
976 SpriteInputIndex::AtlasTextureSize as u64,
977 mem::size_of_val(&texture_size) as u64,
978 &texture_size as *const Size<DevicePixels> as *const _,
979 );
980 command_encoder.set_fragment_buffer(
981 SpriteInputIndex::Sprites as u64,
982 Some(&instance_buffer.metal_buffer),
983 *instance_offset as u64,
984 );
985 command_encoder.set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
986
987 let sprite_bytes_len = mem::size_of_val(sprites);
988 let buffer_contents =
989 unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
990
991 let next_offset = *instance_offset + sprite_bytes_len;
992 if next_offset > instance_buffer.size {
993 return false;
994 }
995
996 unsafe {
997 ptr::copy_nonoverlapping(
998 sprites.as_ptr() as *const u8,
999 buffer_contents,
1000 sprite_bytes_len,
1001 );
1002 }
1003
1004 command_encoder.draw_primitives_instanced(
1005 metal::MTLPrimitiveType::Triangle,
1006 0,
1007 6,
1008 sprites.len() as u64,
1009 );
1010 *instance_offset = next_offset;
1011 true
1012 }
1013
1014 fn draw_surfaces(
1015 &mut self,
1016 surfaces: &[Surface],
1017 instance_buffer: &mut InstanceBuffer,
1018 instance_offset: &mut usize,
1019 viewport_size: Size<DevicePixels>,
1020 command_encoder: &metal::RenderCommandEncoderRef,
1021 ) -> bool {
1022 command_encoder.set_render_pipeline_state(&self.surfaces_pipeline_state);
1023 command_encoder.set_vertex_buffer(
1024 SurfaceInputIndex::Vertices as u64,
1025 Some(&self.unit_vertices),
1026 0,
1027 );
1028 command_encoder.set_vertex_bytes(
1029 SurfaceInputIndex::ViewportSize as u64,
1030 mem::size_of_val(&viewport_size) as u64,
1031 &viewport_size as *const Size<DevicePixels> as *const _,
1032 );
1033
1034 for surface in surfaces {
1035 let texture_size = size(
1036 DevicePixels::from(surface.image_buffer.width() as i32),
1037 DevicePixels::from(surface.image_buffer.height() as i32),
1038 );
1039
1040 assert_eq!(
1041 surface.image_buffer.pixel_format_type(),
1042 media::core_video::kCVPixelFormatType_420YpCbCr8BiPlanarFullRange
1043 );
1044
1045 let y_texture = unsafe {
1046 self.core_video_texture_cache
1047 .create_texture_from_image(
1048 surface.image_buffer.as_concrete_TypeRef(),
1049 ptr::null(),
1050 MTLPixelFormat::R8Unorm,
1051 surface.image_buffer.plane_width(0),
1052 surface.image_buffer.plane_height(0),
1053 0,
1054 )
1055 .unwrap()
1056 };
1057 let cb_cr_texture = unsafe {
1058 self.core_video_texture_cache
1059 .create_texture_from_image(
1060 surface.image_buffer.as_concrete_TypeRef(),
1061 ptr::null(),
1062 MTLPixelFormat::RG8Unorm,
1063 surface.image_buffer.plane_width(1),
1064 surface.image_buffer.plane_height(1),
1065 1,
1066 )
1067 .unwrap()
1068 };
1069
1070 align_offset(instance_offset);
1071 let next_offset = *instance_offset + mem::size_of::<Surface>();
1072 if next_offset > instance_buffer.size {
1073 return false;
1074 }
1075
1076 command_encoder.set_vertex_buffer(
1077 SurfaceInputIndex::Surfaces as u64,
1078 Some(&instance_buffer.metal_buffer),
1079 *instance_offset as u64,
1080 );
1081 command_encoder.set_vertex_bytes(
1082 SurfaceInputIndex::TextureSize as u64,
1083 mem::size_of_val(&texture_size) as u64,
1084 &texture_size as *const Size<DevicePixels> as *const _,
1085 );
1086 command_encoder.set_fragment_texture(
1087 SurfaceInputIndex::YTexture as u64,
1088 Some(y_texture.as_texture_ref()),
1089 );
1090 command_encoder.set_fragment_texture(
1091 SurfaceInputIndex::CbCrTexture as u64,
1092 Some(cb_cr_texture.as_texture_ref()),
1093 );
1094
1095 unsafe {
1096 let buffer_contents = (instance_buffer.metal_buffer.contents() as *mut u8)
1097 .add(*instance_offset)
1098 as *mut SurfaceBounds;
1099 ptr::write(
1100 buffer_contents,
1101 SurfaceBounds {
1102 bounds: surface.bounds,
1103 content_mask: surface.content_mask.clone(),
1104 },
1105 );
1106 }
1107
1108 command_encoder.draw_primitives(metal::MTLPrimitiveType::Triangle, 0, 6);
1109 *instance_offset = next_offset;
1110 }
1111 true
1112 }
1113}
1114
1115fn build_pipeline_state(
1116 device: &metal::DeviceRef,
1117 library: &metal::LibraryRef,
1118 label: &str,
1119 vertex_fn_name: &str,
1120 fragment_fn_name: &str,
1121 pixel_format: metal::MTLPixelFormat,
1122) -> metal::RenderPipelineState {
1123 let vertex_fn = library
1124 .get_function(vertex_fn_name, None)
1125 .expect("error locating vertex function");
1126 let fragment_fn = library
1127 .get_function(fragment_fn_name, None)
1128 .expect("error locating fragment function");
1129
1130 let descriptor = metal::RenderPipelineDescriptor::new();
1131 descriptor.set_label(label);
1132 descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
1133 descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
1134 let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
1135 color_attachment.set_pixel_format(pixel_format);
1136 color_attachment.set_blending_enabled(true);
1137 color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
1138 color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
1139 color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::SourceAlpha);
1140 color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::One);
1141 color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
1142 color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::One);
1143
1144 device
1145 .new_render_pipeline_state(&descriptor)
1146 .expect("could not create render pipeline state")
1147}
1148
1149fn build_path_rasterization_pipeline_state(
1150 device: &metal::DeviceRef,
1151 library: &metal::LibraryRef,
1152 label: &str,
1153 vertex_fn_name: &str,
1154 fragment_fn_name: &str,
1155 pixel_format: metal::MTLPixelFormat,
1156) -> metal::RenderPipelineState {
1157 let vertex_fn = library
1158 .get_function(vertex_fn_name, None)
1159 .expect("error locating vertex function");
1160 let fragment_fn = library
1161 .get_function(fragment_fn_name, None)
1162 .expect("error locating fragment function");
1163
1164 let descriptor = metal::RenderPipelineDescriptor::new();
1165 descriptor.set_label(label);
1166 descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
1167 descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
1168 let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
1169 color_attachment.set_pixel_format(pixel_format);
1170 color_attachment.set_blending_enabled(true);
1171 color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
1172 color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
1173 color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::One);
1174 color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::One);
1175 color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::One);
1176 color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::One);
1177
1178 device
1179 .new_render_pipeline_state(&descriptor)
1180 .expect("could not create render pipeline state")
1181}
1182
1183// Align to multiples of 256 make Metal happy.
1184fn align_offset(offset: &mut usize) {
1185 *offset = ((*offset + 255) / 256) * 256;
1186}
1187
1188#[repr(C)]
1189enum ShadowInputIndex {
1190 Vertices = 0,
1191 Shadows = 1,
1192 ViewportSize = 2,
1193}
1194
1195#[repr(C)]
1196enum QuadInputIndex {
1197 Vertices = 0,
1198 Quads = 1,
1199 ViewportSize = 2,
1200}
1201
1202#[repr(C)]
1203enum UnderlineInputIndex {
1204 Vertices = 0,
1205 Underlines = 1,
1206 ViewportSize = 2,
1207}
1208
1209#[repr(C)]
1210enum SpriteInputIndex {
1211 Vertices = 0,
1212 Sprites = 1,
1213 ViewportSize = 2,
1214 AtlasTextureSize = 3,
1215 AtlasTexture = 4,
1216}
1217
1218#[repr(C)]
1219enum SurfaceInputIndex {
1220 Vertices = 0,
1221 Surfaces = 1,
1222 ViewportSize = 2,
1223 TextureSize = 3,
1224 YTexture = 4,
1225 CbCrTexture = 5,
1226}
1227
1228#[repr(C)]
1229enum PathRasterizationInputIndex {
1230 Vertices = 0,
1231 AtlasTextureSize = 1,
1232}
1233
1234#[derive(Clone, Debug, Eq, PartialEq)]
1235#[repr(C)]
1236pub struct PathSprite {
1237 pub bounds: Bounds<ScaledPixels>,
1238 pub color: Hsla,
1239 pub tile: AtlasTile,
1240}
1241
1242#[derive(Clone, Debug, Eq, PartialEq)]
1243#[repr(C)]
1244pub struct SurfaceBounds {
1245 pub bounds: Bounds<ScaledPixels>,
1246 pub content_mask: ContentMask<ScaledPixels>,
1247}