1use super::metal_atlas::MetalAtlas;
2use crate::{
3 point, size, AtlasTextureId, AtlasTextureKind, AtlasTile, Bounds, ContentMask, DevicePixels,
4 Hsla, MonochromeSprite, Path, PathId, PathVertex, PolychromeSprite, PrimitiveBatch, Quad,
5 ScaledPixels, Scene, Shadow, Size, Surface, Underline,
6};
7use anyhow::{anyhow, Result};
8use block::ConcreteBlock;
9use cocoa::{
10 base::{NO, YES},
11 foundation::{NSSize, NSUInteger},
12 quartzcore::AutoresizingMask,
13};
14use collections::HashMap;
15use core_foundation::base::TCFType;
16use foreign_types::ForeignType;
17use media::core_video::CVMetalTextureCache;
18use metal::{CAMetalLayer, CommandQueue, MTLPixelFormat, MTLResourceOptions, NSRange};
19use objc::{self, msg_send, sel, sel_impl};
20use parking_lot::Mutex;
21use smallvec::SmallVec;
22use std::{cell::Cell, ffi::c_void, mem, ptr, sync::Arc};
23
24// Exported to metal
25pub(crate) type PointF = crate::Point<f32>;
26
27#[cfg(not(feature = "runtime_shaders"))]
28const SHADERS_METALLIB: &[u8] = include_bytes!(concat!(env!("OUT_DIR"), "/shaders.metallib"));
29#[cfg(feature = "runtime_shaders")]
30const SHADERS_SOURCE_FILE: &str = include_str!(concat!(env!("OUT_DIR"), "/stitched_shaders.metal"));
31
32pub type Context = Arc<Mutex<InstanceBufferPool>>;
33pub type Renderer = MetalRenderer;
34
35pub unsafe fn new_renderer(
36 context: self::Context,
37 _native_window: *mut c_void,
38 _native_view: *mut c_void,
39 _bounds: crate::Size<f32>,
40 _transparent: bool,
41) -> Renderer {
42 MetalRenderer::new(context)
43}
44
45pub(crate) struct InstanceBufferPool {
46 buffer_size: usize,
47 buffers: Vec<metal::Buffer>,
48}
49
50impl Default for InstanceBufferPool {
51 fn default() -> Self {
52 Self {
53 buffer_size: 2 * 1024 * 1024,
54 buffers: Vec::new(),
55 }
56 }
57}
58
59pub(crate) struct InstanceBuffer {
60 metal_buffer: metal::Buffer,
61 size: usize,
62}
63
64impl InstanceBufferPool {
65 pub(crate) fn reset(&mut self, buffer_size: usize) {
66 self.buffer_size = buffer_size;
67 self.buffers.clear();
68 }
69
70 pub(crate) fn acquire(&mut self, device: &metal::Device) -> InstanceBuffer {
71 let buffer = self.buffers.pop().unwrap_or_else(|| {
72 device.new_buffer(
73 self.buffer_size as u64,
74 MTLResourceOptions::StorageModeManaged,
75 )
76 });
77 InstanceBuffer {
78 metal_buffer: buffer,
79 size: self.buffer_size,
80 }
81 }
82
83 pub(crate) fn release(&mut self, buffer: InstanceBuffer) {
84 if buffer.size == self.buffer_size {
85 self.buffers.push(buffer.metal_buffer)
86 }
87 }
88}
89
90pub(crate) struct MetalRenderer {
91 device: metal::Device,
92 layer: metal::MetalLayer,
93 presents_with_transaction: bool,
94 command_queue: CommandQueue,
95 paths_rasterization_pipeline_state: metal::RenderPipelineState,
96 path_sprites_pipeline_state: metal::RenderPipelineState,
97 shadows_pipeline_state: metal::RenderPipelineState,
98 quads_pipeline_state: metal::RenderPipelineState,
99 underlines_pipeline_state: metal::RenderPipelineState,
100 monochrome_sprites_pipeline_state: metal::RenderPipelineState,
101 polychrome_sprites_pipeline_state: metal::RenderPipelineState,
102 surfaces_pipeline_state: metal::RenderPipelineState,
103 unit_vertices: metal::Buffer,
104 #[allow(clippy::arc_with_non_send_sync)]
105 instance_buffer_pool: Arc<Mutex<InstanceBufferPool>>,
106 sprite_atlas: Arc<MetalAtlas>,
107 core_video_texture_cache: CVMetalTextureCache,
108}
109
110impl MetalRenderer {
111 pub fn new(instance_buffer_pool: Arc<Mutex<InstanceBufferPool>>) -> Self {
112 let device: metal::Device = if let Some(device) = metal::Device::system_default() {
113 device
114 } else {
115 log::error!("unable to access a compatible graphics device");
116 std::process::exit(1);
117 };
118
119 let layer = metal::MetalLayer::new();
120 layer.set_device(&device);
121 layer.set_pixel_format(MTLPixelFormat::BGRA8Unorm);
122 layer.set_opaque(false);
123 layer.set_maximum_drawable_count(3);
124 unsafe {
125 let _: () = msg_send![&*layer, setAllowsNextDrawableTimeout: NO];
126 let _: () = msg_send![&*layer, setNeedsDisplayOnBoundsChange: YES];
127 let _: () = msg_send![
128 &*layer,
129 setAutoresizingMask: AutoresizingMask::WIDTH_SIZABLE
130 | AutoresizingMask::HEIGHT_SIZABLE
131 ];
132 }
133 #[cfg(feature = "runtime_shaders")]
134 let library = device
135 .new_library_with_source(&SHADERS_SOURCE_FILE, &metal::CompileOptions::new())
136 .expect("error building metal library");
137 #[cfg(not(feature = "runtime_shaders"))]
138 let library = device
139 .new_library_with_data(SHADERS_METALLIB)
140 .expect("error building metal library");
141
142 fn to_float2_bits(point: PointF) -> u64 {
143 let mut output = point.y.to_bits() as u64;
144 output <<= 32;
145 output |= point.x.to_bits() as u64;
146 output
147 }
148
149 let unit_vertices = [
150 to_float2_bits(point(0., 0.)),
151 to_float2_bits(point(1., 0.)),
152 to_float2_bits(point(0., 1.)),
153 to_float2_bits(point(0., 1.)),
154 to_float2_bits(point(1., 0.)),
155 to_float2_bits(point(1., 1.)),
156 ];
157 let unit_vertices = device.new_buffer_with_data(
158 unit_vertices.as_ptr() as *const c_void,
159 mem::size_of_val(&unit_vertices) as u64,
160 MTLResourceOptions::StorageModeManaged,
161 );
162
163 let paths_rasterization_pipeline_state = build_path_rasterization_pipeline_state(
164 &device,
165 &library,
166 "paths_rasterization",
167 "path_rasterization_vertex",
168 "path_rasterization_fragment",
169 MTLPixelFormat::R16Float,
170 );
171 let path_sprites_pipeline_state = build_pipeline_state(
172 &device,
173 &library,
174 "path_sprites",
175 "path_sprite_vertex",
176 "path_sprite_fragment",
177 MTLPixelFormat::BGRA8Unorm,
178 );
179 let shadows_pipeline_state = build_pipeline_state(
180 &device,
181 &library,
182 "shadows",
183 "shadow_vertex",
184 "shadow_fragment",
185 MTLPixelFormat::BGRA8Unorm,
186 );
187 let quads_pipeline_state = build_pipeline_state(
188 &device,
189 &library,
190 "quads",
191 "quad_vertex",
192 "quad_fragment",
193 MTLPixelFormat::BGRA8Unorm,
194 );
195 let underlines_pipeline_state = build_pipeline_state(
196 &device,
197 &library,
198 "underlines",
199 "underline_vertex",
200 "underline_fragment",
201 MTLPixelFormat::BGRA8Unorm,
202 );
203 let monochrome_sprites_pipeline_state = build_pipeline_state(
204 &device,
205 &library,
206 "monochrome_sprites",
207 "monochrome_sprite_vertex",
208 "monochrome_sprite_fragment",
209 MTLPixelFormat::BGRA8Unorm,
210 );
211 let polychrome_sprites_pipeline_state = build_pipeline_state(
212 &device,
213 &library,
214 "polychrome_sprites",
215 "polychrome_sprite_vertex",
216 "polychrome_sprite_fragment",
217 MTLPixelFormat::BGRA8Unorm,
218 );
219 let surfaces_pipeline_state = build_pipeline_state(
220 &device,
221 &library,
222 "surfaces",
223 "surface_vertex",
224 "surface_fragment",
225 MTLPixelFormat::BGRA8Unorm,
226 );
227
228 let command_queue = device.new_command_queue();
229 let sprite_atlas = Arc::new(MetalAtlas::new(device.clone()));
230 let core_video_texture_cache =
231 unsafe { CVMetalTextureCache::new(device.as_ptr()).unwrap() };
232
233 Self {
234 device,
235 layer,
236 presents_with_transaction: false,
237 command_queue,
238 paths_rasterization_pipeline_state,
239 path_sprites_pipeline_state,
240 shadows_pipeline_state,
241 quads_pipeline_state,
242 underlines_pipeline_state,
243 monochrome_sprites_pipeline_state,
244 polychrome_sprites_pipeline_state,
245 surfaces_pipeline_state,
246 unit_vertices,
247 instance_buffer_pool,
248 sprite_atlas,
249 core_video_texture_cache,
250 }
251 }
252
253 pub fn layer(&self) -> &metal::MetalLayerRef {
254 &self.layer
255 }
256
257 pub fn layer_ptr(&self) -> *mut CAMetalLayer {
258 self.layer.as_ptr()
259 }
260
261 pub fn sprite_atlas(&self) -> &Arc<MetalAtlas> {
262 &self.sprite_atlas
263 }
264
265 pub fn set_presents_with_transaction(&mut self, presents_with_transaction: bool) {
266 self.presents_with_transaction = presents_with_transaction;
267 self.layer
268 .set_presents_with_transaction(presents_with_transaction);
269 }
270
271 pub fn update_drawable_size(&mut self, size: Size<DevicePixels>) {
272 let size = NSSize {
273 width: size.width.0 as f64,
274 height: size.height.0 as f64,
275 };
276 unsafe {
277 let _: () = msg_send![
278 self.layer(),
279 setDrawableSize: size
280 ];
281 }
282 }
283
284 pub fn update_transparency(&mut self, _transparent: bool) {
285 // todo(mac)?
286 }
287
288 pub fn destroy(&mut self) {
289 // nothing to do
290 }
291
292 pub fn draw(&mut self, scene: &Scene) {
293 let layer = self.layer.clone();
294 let viewport_size = layer.drawable_size();
295 let viewport_size: Size<DevicePixels> = size(
296 (viewport_size.width.ceil() as i32).into(),
297 (viewport_size.height.ceil() as i32).into(),
298 );
299 let drawable = if let Some(drawable) = layer.next_drawable() {
300 drawable
301 } else {
302 log::error!(
303 "failed to retrieve next drawable, drawable size: {:?}",
304 viewport_size
305 );
306 return;
307 };
308
309 loop {
310 let mut instance_buffer = self.instance_buffer_pool.lock().acquire(&self.device);
311
312 let command_buffer =
313 self.draw_primitives(scene, &mut instance_buffer, drawable, viewport_size);
314
315 match command_buffer {
316 Ok(command_buffer) => {
317 let instance_buffer_pool = self.instance_buffer_pool.clone();
318 let instance_buffer = Cell::new(Some(instance_buffer));
319 let block = ConcreteBlock::new(move |_| {
320 if let Some(instance_buffer) = instance_buffer.take() {
321 instance_buffer_pool.lock().release(instance_buffer);
322 }
323 });
324 let block = block.copy();
325 command_buffer.add_completed_handler(&block);
326
327 if self.presents_with_transaction {
328 command_buffer.commit();
329 command_buffer.wait_until_scheduled();
330 drawable.present();
331 } else {
332 command_buffer.present_drawable(drawable);
333 command_buffer.commit();
334 }
335 return;
336 }
337 Err(err) => {
338 log::error!(
339 "failed to render: {}. retrying with larger instance buffer size",
340 err
341 );
342 let mut instance_buffer_pool = self.instance_buffer_pool.lock();
343 let buffer_size = instance_buffer_pool.buffer_size;
344 if buffer_size >= 256 * 1024 * 1024 {
345 log::error!("instance buffer size grew too large: {}", buffer_size);
346 break;
347 }
348 instance_buffer_pool.reset(buffer_size * 2);
349 log::info!(
350 "increased instance buffer size to {}",
351 instance_buffer_pool.buffer_size
352 );
353 }
354 }
355 }
356 }
357
358 fn draw_primitives(
359 &mut self,
360 scene: &Scene,
361 instance_buffer: &mut InstanceBuffer,
362 drawable: &metal::MetalDrawableRef,
363 viewport_size: Size<DevicePixels>,
364 ) -> Result<metal::CommandBuffer> {
365 let command_queue = self.command_queue.clone();
366 let command_buffer = command_queue.new_command_buffer();
367 let mut instance_offset = 0;
368
369 let Some(path_tiles) = self.rasterize_paths(
370 scene.paths(),
371 instance_buffer,
372 &mut instance_offset,
373 command_buffer,
374 ) else {
375 return Err(anyhow!("failed to rasterize {} paths", scene.paths().len()));
376 };
377
378 let render_pass_descriptor = metal::RenderPassDescriptor::new();
379 let color_attachment = render_pass_descriptor
380 .color_attachments()
381 .object_at(0)
382 .unwrap();
383
384 color_attachment.set_texture(Some(drawable.texture()));
385 color_attachment.set_load_action(metal::MTLLoadAction::Clear);
386 color_attachment.set_store_action(metal::MTLStoreAction::Store);
387 let alpha = if self.layer.is_opaque() { 1. } else { 0. };
388 color_attachment.set_clear_color(metal::MTLClearColor::new(0., 0., 0., alpha));
389 let command_encoder = command_buffer.new_render_command_encoder(render_pass_descriptor);
390
391 command_encoder.set_viewport(metal::MTLViewport {
392 originX: 0.0,
393 originY: 0.0,
394 width: i32::from(viewport_size.width) as f64,
395 height: i32::from(viewport_size.height) as f64,
396 znear: 0.0,
397 zfar: 1.0,
398 });
399
400 for batch in scene.batches() {
401 let ok = match batch {
402 PrimitiveBatch::Shadows(shadows) => self.draw_shadows(
403 shadows,
404 instance_buffer,
405 &mut instance_offset,
406 viewport_size,
407 command_encoder,
408 ),
409 PrimitiveBatch::Quads(quads) => self.draw_quads(
410 quads,
411 instance_buffer,
412 &mut instance_offset,
413 viewport_size,
414 command_encoder,
415 ),
416 PrimitiveBatch::Paths(paths) => self.draw_paths(
417 paths,
418 &path_tiles,
419 instance_buffer,
420 &mut instance_offset,
421 viewport_size,
422 command_encoder,
423 ),
424 PrimitiveBatch::Underlines(underlines) => self.draw_underlines(
425 underlines,
426 instance_buffer,
427 &mut instance_offset,
428 viewport_size,
429 command_encoder,
430 ),
431 PrimitiveBatch::MonochromeSprites {
432 texture_id,
433 sprites,
434 } => self.draw_monochrome_sprites(
435 texture_id,
436 sprites,
437 instance_buffer,
438 &mut instance_offset,
439 viewport_size,
440 command_encoder,
441 ),
442 PrimitiveBatch::PolychromeSprites {
443 texture_id,
444 sprites,
445 } => self.draw_polychrome_sprites(
446 texture_id,
447 sprites,
448 instance_buffer,
449 &mut instance_offset,
450 viewport_size,
451 command_encoder,
452 ),
453 PrimitiveBatch::Surfaces(surfaces) => self.draw_surfaces(
454 surfaces,
455 instance_buffer,
456 &mut instance_offset,
457 viewport_size,
458 command_encoder,
459 ),
460 };
461
462 if !ok {
463 command_encoder.end_encoding();
464 return Err(anyhow!("scene too large: {} paths, {} shadows, {} quads, {} underlines, {} mono, {} poly, {} surfaces",
465 scene.paths.len(),
466 scene.shadows.len(),
467 scene.quads.len(),
468 scene.underlines.len(),
469 scene.monochrome_sprites.len(),
470 scene.polychrome_sprites.len(),
471 scene.surfaces.len(),
472 ));
473 }
474 }
475
476 command_encoder.end_encoding();
477
478 instance_buffer.metal_buffer.did_modify_range(NSRange {
479 location: 0,
480 length: instance_offset as NSUInteger,
481 });
482 Ok(command_buffer.to_owned())
483 }
484
485 fn rasterize_paths(
486 &mut self,
487 paths: &[Path<ScaledPixels>],
488 instance_buffer: &mut InstanceBuffer,
489 instance_offset: &mut usize,
490 command_buffer: &metal::CommandBufferRef,
491 ) -> Option<HashMap<PathId, AtlasTile>> {
492 self.sprite_atlas.clear_textures(AtlasTextureKind::Path);
493
494 let mut tiles = HashMap::default();
495 let mut vertices_by_texture_id = HashMap::default();
496 for path in paths {
497 let clipped_bounds = path.bounds.intersect(&path.content_mask.bounds);
498
499 let tile = self
500 .sprite_atlas
501 .allocate(clipped_bounds.size.map(Into::into), AtlasTextureKind::Path)?;
502 vertices_by_texture_id
503 .entry(tile.texture_id)
504 .or_insert(Vec::new())
505 .extend(path.vertices.iter().map(|vertex| PathVertex {
506 xy_position: vertex.xy_position - clipped_bounds.origin
507 + tile.bounds.origin.map(Into::into),
508 st_position: vertex.st_position,
509 content_mask: ContentMask {
510 bounds: tile.bounds.map(Into::into),
511 },
512 }));
513 tiles.insert(path.id, tile);
514 }
515
516 for (texture_id, vertices) in vertices_by_texture_id {
517 align_offset(instance_offset);
518 let vertices_bytes_len = mem::size_of_val(vertices.as_slice());
519 let next_offset = *instance_offset + vertices_bytes_len;
520 if next_offset > instance_buffer.size {
521 return None;
522 }
523
524 let render_pass_descriptor = metal::RenderPassDescriptor::new();
525 let color_attachment = render_pass_descriptor
526 .color_attachments()
527 .object_at(0)
528 .unwrap();
529
530 let texture = self.sprite_atlas.metal_texture(texture_id);
531 color_attachment.set_texture(Some(&texture));
532 color_attachment.set_load_action(metal::MTLLoadAction::Clear);
533 color_attachment.set_store_action(metal::MTLStoreAction::Store);
534 color_attachment.set_clear_color(metal::MTLClearColor::new(0., 0., 0., 1.));
535 let command_encoder = command_buffer.new_render_command_encoder(render_pass_descriptor);
536 command_encoder.set_render_pipeline_state(&self.paths_rasterization_pipeline_state);
537 command_encoder.set_vertex_buffer(
538 PathRasterizationInputIndex::Vertices as u64,
539 Some(&instance_buffer.metal_buffer),
540 *instance_offset as u64,
541 );
542 let texture_size = Size {
543 width: DevicePixels::from(texture.width()),
544 height: DevicePixels::from(texture.height()),
545 };
546 command_encoder.set_vertex_bytes(
547 PathRasterizationInputIndex::AtlasTextureSize as u64,
548 mem::size_of_val(&texture_size) as u64,
549 &texture_size as *const Size<DevicePixels> as *const _,
550 );
551
552 let buffer_contents = unsafe {
553 (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset)
554 };
555 unsafe {
556 ptr::copy_nonoverlapping(
557 vertices.as_ptr() as *const u8,
558 buffer_contents,
559 vertices_bytes_len,
560 );
561 }
562
563 command_encoder.draw_primitives(
564 metal::MTLPrimitiveType::Triangle,
565 0,
566 vertices.len() as u64,
567 );
568 command_encoder.end_encoding();
569 *instance_offset = next_offset;
570 }
571
572 Some(tiles)
573 }
574
575 fn draw_shadows(
576 &mut self,
577 shadows: &[Shadow],
578 instance_buffer: &mut InstanceBuffer,
579 instance_offset: &mut usize,
580 viewport_size: Size<DevicePixels>,
581 command_encoder: &metal::RenderCommandEncoderRef,
582 ) -> bool {
583 if shadows.is_empty() {
584 return true;
585 }
586 align_offset(instance_offset);
587
588 command_encoder.set_render_pipeline_state(&self.shadows_pipeline_state);
589 command_encoder.set_vertex_buffer(
590 ShadowInputIndex::Vertices as u64,
591 Some(&self.unit_vertices),
592 0,
593 );
594 command_encoder.set_vertex_buffer(
595 ShadowInputIndex::Shadows as u64,
596 Some(&instance_buffer.metal_buffer),
597 *instance_offset as u64,
598 );
599 command_encoder.set_fragment_buffer(
600 ShadowInputIndex::Shadows as u64,
601 Some(&instance_buffer.metal_buffer),
602 *instance_offset as u64,
603 );
604
605 command_encoder.set_vertex_bytes(
606 ShadowInputIndex::ViewportSize as u64,
607 mem::size_of_val(&viewport_size) as u64,
608 &viewport_size as *const Size<DevicePixels> as *const _,
609 );
610
611 let shadow_bytes_len = mem::size_of_val(shadows);
612 let buffer_contents =
613 unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
614
615 let next_offset = *instance_offset + shadow_bytes_len;
616 if next_offset > instance_buffer.size {
617 return false;
618 }
619
620 unsafe {
621 ptr::copy_nonoverlapping(
622 shadows.as_ptr() as *const u8,
623 buffer_contents,
624 shadow_bytes_len,
625 );
626 }
627
628 command_encoder.draw_primitives_instanced(
629 metal::MTLPrimitiveType::Triangle,
630 0,
631 6,
632 shadows.len() as u64,
633 );
634 *instance_offset = next_offset;
635 true
636 }
637
638 fn draw_quads(
639 &mut self,
640 quads: &[Quad],
641 instance_buffer: &mut InstanceBuffer,
642 instance_offset: &mut usize,
643 viewport_size: Size<DevicePixels>,
644 command_encoder: &metal::RenderCommandEncoderRef,
645 ) -> bool {
646 if quads.is_empty() {
647 return true;
648 }
649 align_offset(instance_offset);
650
651 command_encoder.set_render_pipeline_state(&self.quads_pipeline_state);
652 command_encoder.set_vertex_buffer(
653 QuadInputIndex::Vertices as u64,
654 Some(&self.unit_vertices),
655 0,
656 );
657 command_encoder.set_vertex_buffer(
658 QuadInputIndex::Quads as u64,
659 Some(&instance_buffer.metal_buffer),
660 *instance_offset as u64,
661 );
662 command_encoder.set_fragment_buffer(
663 QuadInputIndex::Quads as u64,
664 Some(&instance_buffer.metal_buffer),
665 *instance_offset as u64,
666 );
667
668 command_encoder.set_vertex_bytes(
669 QuadInputIndex::ViewportSize as u64,
670 mem::size_of_val(&viewport_size) as u64,
671 &viewport_size as *const Size<DevicePixels> as *const _,
672 );
673
674 let quad_bytes_len = mem::size_of_val(quads);
675 let buffer_contents =
676 unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
677
678 let next_offset = *instance_offset + quad_bytes_len;
679 if next_offset > instance_buffer.size {
680 return false;
681 }
682
683 unsafe {
684 ptr::copy_nonoverlapping(quads.as_ptr() as *const u8, buffer_contents, quad_bytes_len);
685 }
686
687 command_encoder.draw_primitives_instanced(
688 metal::MTLPrimitiveType::Triangle,
689 0,
690 6,
691 quads.len() as u64,
692 );
693 *instance_offset = next_offset;
694 true
695 }
696
697 fn draw_paths(
698 &mut self,
699 paths: &[Path<ScaledPixels>],
700 tiles_by_path_id: &HashMap<PathId, AtlasTile>,
701 instance_buffer: &mut InstanceBuffer,
702 instance_offset: &mut usize,
703 viewport_size: Size<DevicePixels>,
704 command_encoder: &metal::RenderCommandEncoderRef,
705 ) -> bool {
706 if paths.is_empty() {
707 return true;
708 }
709
710 command_encoder.set_render_pipeline_state(&self.path_sprites_pipeline_state);
711 command_encoder.set_vertex_buffer(
712 SpriteInputIndex::Vertices as u64,
713 Some(&self.unit_vertices),
714 0,
715 );
716 command_encoder.set_vertex_bytes(
717 SpriteInputIndex::ViewportSize as u64,
718 mem::size_of_val(&viewport_size) as u64,
719 &viewport_size as *const Size<DevicePixels> as *const _,
720 );
721
722 let mut prev_texture_id = None;
723 let mut sprites = SmallVec::<[_; 1]>::new();
724 let mut paths_and_tiles = paths
725 .iter()
726 .map(|path| (path, tiles_by_path_id.get(&path.id).unwrap()))
727 .peekable();
728
729 loop {
730 if let Some((path, tile)) = paths_and_tiles.peek() {
731 if prev_texture_id.map_or(true, |texture_id| texture_id == tile.texture_id) {
732 prev_texture_id = Some(tile.texture_id);
733 let origin = path.bounds.intersect(&path.content_mask.bounds).origin;
734 sprites.push(PathSprite {
735 bounds: Bounds {
736 origin: origin.map(|p| p.floor()),
737 size: tile.bounds.size.map(Into::into),
738 },
739 color: path.color,
740 tile: (*tile).clone(),
741 });
742 paths_and_tiles.next();
743 continue;
744 }
745 }
746
747 if sprites.is_empty() {
748 break;
749 } else {
750 align_offset(instance_offset);
751 let texture_id = prev_texture_id.take().unwrap();
752 let texture: metal::Texture = self.sprite_atlas.metal_texture(texture_id);
753 let texture_size = size(
754 DevicePixels(texture.width() as i32),
755 DevicePixels(texture.height() as i32),
756 );
757
758 command_encoder.set_vertex_buffer(
759 SpriteInputIndex::Sprites as u64,
760 Some(&instance_buffer.metal_buffer),
761 *instance_offset as u64,
762 );
763 command_encoder.set_vertex_bytes(
764 SpriteInputIndex::AtlasTextureSize as u64,
765 mem::size_of_val(&texture_size) as u64,
766 &texture_size as *const Size<DevicePixels> as *const _,
767 );
768 command_encoder.set_fragment_buffer(
769 SpriteInputIndex::Sprites as u64,
770 Some(&instance_buffer.metal_buffer),
771 *instance_offset as u64,
772 );
773 command_encoder
774 .set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
775
776 let sprite_bytes_len = mem::size_of_val(sprites.as_slice());
777 let next_offset = *instance_offset + sprite_bytes_len;
778 if next_offset > instance_buffer.size {
779 return false;
780 }
781
782 let buffer_contents = unsafe {
783 (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset)
784 };
785
786 unsafe {
787 ptr::copy_nonoverlapping(
788 sprites.as_ptr() as *const u8,
789 buffer_contents,
790 sprite_bytes_len,
791 );
792 }
793
794 command_encoder.draw_primitives_instanced(
795 metal::MTLPrimitiveType::Triangle,
796 0,
797 6,
798 sprites.len() as u64,
799 );
800 *instance_offset = next_offset;
801 sprites.clear();
802 }
803 }
804 true
805 }
806
807 fn draw_underlines(
808 &mut self,
809 underlines: &[Underline],
810 instance_buffer: &mut InstanceBuffer,
811 instance_offset: &mut usize,
812 viewport_size: Size<DevicePixels>,
813 command_encoder: &metal::RenderCommandEncoderRef,
814 ) -> bool {
815 if underlines.is_empty() {
816 return true;
817 }
818 align_offset(instance_offset);
819
820 command_encoder.set_render_pipeline_state(&self.underlines_pipeline_state);
821 command_encoder.set_vertex_buffer(
822 UnderlineInputIndex::Vertices as u64,
823 Some(&self.unit_vertices),
824 0,
825 );
826 command_encoder.set_vertex_buffer(
827 UnderlineInputIndex::Underlines as u64,
828 Some(&instance_buffer.metal_buffer),
829 *instance_offset as u64,
830 );
831 command_encoder.set_fragment_buffer(
832 UnderlineInputIndex::Underlines as u64,
833 Some(&instance_buffer.metal_buffer),
834 *instance_offset as u64,
835 );
836
837 command_encoder.set_vertex_bytes(
838 UnderlineInputIndex::ViewportSize as u64,
839 mem::size_of_val(&viewport_size) as u64,
840 &viewport_size as *const Size<DevicePixels> as *const _,
841 );
842
843 let underline_bytes_len = mem::size_of_val(underlines);
844 let buffer_contents =
845 unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
846
847 let next_offset = *instance_offset + underline_bytes_len;
848 if next_offset > instance_buffer.size {
849 return false;
850 }
851
852 unsafe {
853 ptr::copy_nonoverlapping(
854 underlines.as_ptr() as *const u8,
855 buffer_contents,
856 underline_bytes_len,
857 );
858 }
859
860 command_encoder.draw_primitives_instanced(
861 metal::MTLPrimitiveType::Triangle,
862 0,
863 6,
864 underlines.len() as u64,
865 );
866 *instance_offset = next_offset;
867 true
868 }
869
870 fn draw_monochrome_sprites(
871 &mut self,
872 texture_id: AtlasTextureId,
873 sprites: &[MonochromeSprite],
874 instance_buffer: &mut InstanceBuffer,
875 instance_offset: &mut usize,
876 viewport_size: Size<DevicePixels>,
877 command_encoder: &metal::RenderCommandEncoderRef,
878 ) -> bool {
879 if sprites.is_empty() {
880 return true;
881 }
882 align_offset(instance_offset);
883
884 let sprite_bytes_len = mem::size_of_val(sprites);
885 let buffer_contents =
886 unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
887
888 let next_offset = *instance_offset + sprite_bytes_len;
889 if next_offset > instance_buffer.size {
890 return false;
891 }
892
893 let texture = self.sprite_atlas.metal_texture(texture_id);
894 let texture_size = size(
895 DevicePixels(texture.width() as i32),
896 DevicePixels(texture.height() as i32),
897 );
898 command_encoder.set_render_pipeline_state(&self.monochrome_sprites_pipeline_state);
899 command_encoder.set_vertex_buffer(
900 SpriteInputIndex::Vertices as u64,
901 Some(&self.unit_vertices),
902 0,
903 );
904 command_encoder.set_vertex_buffer(
905 SpriteInputIndex::Sprites as u64,
906 Some(&instance_buffer.metal_buffer),
907 *instance_offset as u64,
908 );
909 command_encoder.set_vertex_bytes(
910 SpriteInputIndex::ViewportSize as u64,
911 mem::size_of_val(&viewport_size) as u64,
912 &viewport_size as *const Size<DevicePixels> as *const _,
913 );
914 command_encoder.set_vertex_bytes(
915 SpriteInputIndex::AtlasTextureSize as u64,
916 mem::size_of_val(&texture_size) as u64,
917 &texture_size as *const Size<DevicePixels> as *const _,
918 );
919 command_encoder.set_fragment_buffer(
920 SpriteInputIndex::Sprites as u64,
921 Some(&instance_buffer.metal_buffer),
922 *instance_offset as u64,
923 );
924 command_encoder.set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
925
926 unsafe {
927 ptr::copy_nonoverlapping(
928 sprites.as_ptr() as *const u8,
929 buffer_contents,
930 sprite_bytes_len,
931 );
932 }
933
934 command_encoder.draw_primitives_instanced(
935 metal::MTLPrimitiveType::Triangle,
936 0,
937 6,
938 sprites.len() as u64,
939 );
940 *instance_offset = next_offset;
941 true
942 }
943
944 fn draw_polychrome_sprites(
945 &mut self,
946 texture_id: AtlasTextureId,
947 sprites: &[PolychromeSprite],
948 instance_buffer: &mut InstanceBuffer,
949 instance_offset: &mut usize,
950 viewport_size: Size<DevicePixels>,
951 command_encoder: &metal::RenderCommandEncoderRef,
952 ) -> bool {
953 if sprites.is_empty() {
954 return true;
955 }
956 align_offset(instance_offset);
957
958 let texture = self.sprite_atlas.metal_texture(texture_id);
959 let texture_size = size(
960 DevicePixels(texture.width() as i32),
961 DevicePixels(texture.height() as i32),
962 );
963 command_encoder.set_render_pipeline_state(&self.polychrome_sprites_pipeline_state);
964 command_encoder.set_vertex_buffer(
965 SpriteInputIndex::Vertices as u64,
966 Some(&self.unit_vertices),
967 0,
968 );
969 command_encoder.set_vertex_buffer(
970 SpriteInputIndex::Sprites as u64,
971 Some(&instance_buffer.metal_buffer),
972 *instance_offset as u64,
973 );
974 command_encoder.set_vertex_bytes(
975 SpriteInputIndex::ViewportSize as u64,
976 mem::size_of_val(&viewport_size) as u64,
977 &viewport_size as *const Size<DevicePixels> as *const _,
978 );
979 command_encoder.set_vertex_bytes(
980 SpriteInputIndex::AtlasTextureSize as u64,
981 mem::size_of_val(&texture_size) as u64,
982 &texture_size as *const Size<DevicePixels> as *const _,
983 );
984 command_encoder.set_fragment_buffer(
985 SpriteInputIndex::Sprites as u64,
986 Some(&instance_buffer.metal_buffer),
987 *instance_offset as u64,
988 );
989 command_encoder.set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
990
991 let sprite_bytes_len = mem::size_of_val(sprites);
992 let buffer_contents =
993 unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
994
995 let next_offset = *instance_offset + sprite_bytes_len;
996 if next_offset > instance_buffer.size {
997 return false;
998 }
999
1000 unsafe {
1001 ptr::copy_nonoverlapping(
1002 sprites.as_ptr() as *const u8,
1003 buffer_contents,
1004 sprite_bytes_len,
1005 );
1006 }
1007
1008 command_encoder.draw_primitives_instanced(
1009 metal::MTLPrimitiveType::Triangle,
1010 0,
1011 6,
1012 sprites.len() as u64,
1013 );
1014 *instance_offset = next_offset;
1015 true
1016 }
1017
1018 fn draw_surfaces(
1019 &mut self,
1020 surfaces: &[Surface],
1021 instance_buffer: &mut InstanceBuffer,
1022 instance_offset: &mut usize,
1023 viewport_size: Size<DevicePixels>,
1024 command_encoder: &metal::RenderCommandEncoderRef,
1025 ) -> bool {
1026 command_encoder.set_render_pipeline_state(&self.surfaces_pipeline_state);
1027 command_encoder.set_vertex_buffer(
1028 SurfaceInputIndex::Vertices as u64,
1029 Some(&self.unit_vertices),
1030 0,
1031 );
1032 command_encoder.set_vertex_bytes(
1033 SurfaceInputIndex::ViewportSize as u64,
1034 mem::size_of_val(&viewport_size) as u64,
1035 &viewport_size as *const Size<DevicePixels> as *const _,
1036 );
1037
1038 for surface in surfaces {
1039 let texture_size = size(
1040 DevicePixels::from(surface.image_buffer.width() as i32),
1041 DevicePixels::from(surface.image_buffer.height() as i32),
1042 );
1043
1044 assert_eq!(
1045 surface.image_buffer.pixel_format_type(),
1046 media::core_video::kCVPixelFormatType_420YpCbCr8BiPlanarFullRange
1047 );
1048
1049 let y_texture = unsafe {
1050 self.core_video_texture_cache
1051 .create_texture_from_image(
1052 surface.image_buffer.as_concrete_TypeRef(),
1053 ptr::null(),
1054 MTLPixelFormat::R8Unorm,
1055 surface.image_buffer.plane_width(0),
1056 surface.image_buffer.plane_height(0),
1057 0,
1058 )
1059 .unwrap()
1060 };
1061 let cb_cr_texture = unsafe {
1062 self.core_video_texture_cache
1063 .create_texture_from_image(
1064 surface.image_buffer.as_concrete_TypeRef(),
1065 ptr::null(),
1066 MTLPixelFormat::RG8Unorm,
1067 surface.image_buffer.plane_width(1),
1068 surface.image_buffer.plane_height(1),
1069 1,
1070 )
1071 .unwrap()
1072 };
1073
1074 align_offset(instance_offset);
1075 let next_offset = *instance_offset + mem::size_of::<Surface>();
1076 if next_offset > instance_buffer.size {
1077 return false;
1078 }
1079
1080 command_encoder.set_vertex_buffer(
1081 SurfaceInputIndex::Surfaces as u64,
1082 Some(&instance_buffer.metal_buffer),
1083 *instance_offset as u64,
1084 );
1085 command_encoder.set_vertex_bytes(
1086 SurfaceInputIndex::TextureSize as u64,
1087 mem::size_of_val(&texture_size) as u64,
1088 &texture_size as *const Size<DevicePixels> as *const _,
1089 );
1090 command_encoder.set_fragment_texture(
1091 SurfaceInputIndex::YTexture as u64,
1092 Some(y_texture.as_texture_ref()),
1093 );
1094 command_encoder.set_fragment_texture(
1095 SurfaceInputIndex::CbCrTexture as u64,
1096 Some(cb_cr_texture.as_texture_ref()),
1097 );
1098
1099 unsafe {
1100 let buffer_contents = (instance_buffer.metal_buffer.contents() as *mut u8)
1101 .add(*instance_offset)
1102 as *mut SurfaceBounds;
1103 ptr::write(
1104 buffer_contents,
1105 SurfaceBounds {
1106 bounds: surface.bounds,
1107 content_mask: surface.content_mask.clone(),
1108 },
1109 );
1110 }
1111
1112 command_encoder.draw_primitives(metal::MTLPrimitiveType::Triangle, 0, 6);
1113 *instance_offset = next_offset;
1114 }
1115 true
1116 }
1117}
1118
1119fn build_pipeline_state(
1120 device: &metal::DeviceRef,
1121 library: &metal::LibraryRef,
1122 label: &str,
1123 vertex_fn_name: &str,
1124 fragment_fn_name: &str,
1125 pixel_format: metal::MTLPixelFormat,
1126) -> metal::RenderPipelineState {
1127 let vertex_fn = library
1128 .get_function(vertex_fn_name, None)
1129 .expect("error locating vertex function");
1130 let fragment_fn = library
1131 .get_function(fragment_fn_name, None)
1132 .expect("error locating fragment function");
1133
1134 let descriptor = metal::RenderPipelineDescriptor::new();
1135 descriptor.set_label(label);
1136 descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
1137 descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
1138 let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
1139 color_attachment.set_pixel_format(pixel_format);
1140 color_attachment.set_blending_enabled(true);
1141 color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
1142 color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
1143 color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::SourceAlpha);
1144 color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::One);
1145 color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
1146 color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::One);
1147
1148 device
1149 .new_render_pipeline_state(&descriptor)
1150 .expect("could not create render pipeline state")
1151}
1152
1153fn build_path_rasterization_pipeline_state(
1154 device: &metal::DeviceRef,
1155 library: &metal::LibraryRef,
1156 label: &str,
1157 vertex_fn_name: &str,
1158 fragment_fn_name: &str,
1159 pixel_format: metal::MTLPixelFormat,
1160) -> metal::RenderPipelineState {
1161 let vertex_fn = library
1162 .get_function(vertex_fn_name, None)
1163 .expect("error locating vertex function");
1164 let fragment_fn = library
1165 .get_function(fragment_fn_name, None)
1166 .expect("error locating fragment function");
1167
1168 let descriptor = metal::RenderPipelineDescriptor::new();
1169 descriptor.set_label(label);
1170 descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
1171 descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
1172 let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
1173 color_attachment.set_pixel_format(pixel_format);
1174 color_attachment.set_blending_enabled(true);
1175 color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
1176 color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
1177 color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::One);
1178 color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::One);
1179 color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::One);
1180 color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::One);
1181
1182 device
1183 .new_render_pipeline_state(&descriptor)
1184 .expect("could not create render pipeline state")
1185}
1186
1187// Align to multiples of 256 make Metal happy.
1188fn align_offset(offset: &mut usize) {
1189 *offset = ((*offset + 255) / 256) * 256;
1190}
1191
1192#[repr(C)]
1193enum ShadowInputIndex {
1194 Vertices = 0,
1195 Shadows = 1,
1196 ViewportSize = 2,
1197}
1198
1199#[repr(C)]
1200enum QuadInputIndex {
1201 Vertices = 0,
1202 Quads = 1,
1203 ViewportSize = 2,
1204}
1205
1206#[repr(C)]
1207enum UnderlineInputIndex {
1208 Vertices = 0,
1209 Underlines = 1,
1210 ViewportSize = 2,
1211}
1212
1213#[repr(C)]
1214enum SpriteInputIndex {
1215 Vertices = 0,
1216 Sprites = 1,
1217 ViewportSize = 2,
1218 AtlasTextureSize = 3,
1219 AtlasTexture = 4,
1220}
1221
1222#[repr(C)]
1223enum SurfaceInputIndex {
1224 Vertices = 0,
1225 Surfaces = 1,
1226 ViewportSize = 2,
1227 TextureSize = 3,
1228 YTexture = 4,
1229 CbCrTexture = 5,
1230}
1231
1232#[repr(C)]
1233enum PathRasterizationInputIndex {
1234 Vertices = 0,
1235 AtlasTextureSize = 1,
1236}
1237
1238#[derive(Clone, Debug, Eq, PartialEq)]
1239#[repr(C)]
1240pub struct PathSprite {
1241 pub bounds: Bounds<ScaledPixels>,
1242 pub color: Hsla,
1243 pub tile: AtlasTile,
1244}
1245
1246#[derive(Clone, Debug, Eq, PartialEq)]
1247#[repr(C)]
1248pub struct SurfaceBounds {
1249 pub bounds: Bounds<ScaledPixels>,
1250 pub content_mask: ContentMask<ScaledPixels>,
1251}