1use crate::{
2 point, size, AtlasTextureId, AtlasTextureKind, AtlasTile, Bounds, ContentMask, DevicePixels,
3 Hsla, MetalAtlas, MonochromeSprite, Path, PathId, PathVertex, PolychromeSprite, PrimitiveBatch,
4 Quad, ScaledPixels, Scene, Shadow, Size, Surface, Underline,
5};
6use block::ConcreteBlock;
7use cocoa::{
8 base::{NO, YES},
9 foundation::NSUInteger,
10 quartzcore::AutoresizingMask,
11};
12use collections::HashMap;
13use core_foundation::base::TCFType;
14use foreign_types::ForeignType;
15use media::core_video::CVMetalTextureCache;
16use metal::{CommandQueue, MTLPixelFormat, MTLResourceOptions, NSRange};
17use objc::{self, msg_send, sel, sel_impl};
18use parking_lot::Mutex;
19use smallvec::SmallVec;
20use std::{cell::Cell, ffi::c_void, mem, ptr, sync::Arc};
21
22#[cfg(not(feature = "runtime_shaders"))]
23const SHADERS_METALLIB: &[u8] = include_bytes!(concat!(env!("OUT_DIR"), "/shaders.metallib"));
24#[cfg(feature = "runtime_shaders")]
25const SHADERS_SOURCE_FILE: &'static str =
26 include_str!(concat!(env!("OUT_DIR"), "/stitched_shaders.metal"));
27const INSTANCE_BUFFER_SIZE: usize = 2 * 1024 * 1024; // This is an arbitrary decision. There's probably a more optimal value (maybe even we could adjust dynamically...)
28
29pub(crate) struct MetalRenderer {
30 device: metal::Device,
31 layer: metal::MetalLayer,
32 command_queue: CommandQueue,
33 paths_rasterization_pipeline_state: metal::RenderPipelineState,
34 path_sprites_pipeline_state: metal::RenderPipelineState,
35 shadows_pipeline_state: metal::RenderPipelineState,
36 quads_pipeline_state: metal::RenderPipelineState,
37 underlines_pipeline_state: metal::RenderPipelineState,
38 monochrome_sprites_pipeline_state: metal::RenderPipelineState,
39 polychrome_sprites_pipeline_state: metal::RenderPipelineState,
40 surfaces_pipeline_state: metal::RenderPipelineState,
41 unit_vertices: metal::Buffer,
42 #[allow(clippy::arc_with_non_send_sync)]
43 instance_buffer_pool: Arc<Mutex<Vec<metal::Buffer>>>,
44 sprite_atlas: Arc<MetalAtlas>,
45 core_video_texture_cache: CVMetalTextureCache,
46}
47
48impl MetalRenderer {
49 pub fn new(instance_buffer_pool: Arc<Mutex<Vec<metal::Buffer>>>) -> Self {
50 let device: metal::Device = if let Some(device) = metal::Device::system_default() {
51 device
52 } else {
53 log::error!("unable to access a compatible graphics device");
54 std::process::exit(1);
55 };
56
57 let layer = metal::MetalLayer::new();
58 layer.set_device(&device);
59 layer.set_pixel_format(MTLPixelFormat::BGRA8Unorm);
60 layer.set_presents_with_transaction(true);
61 layer.set_opaque(true);
62 unsafe {
63 let _: () = msg_send![&*layer, setAllowsNextDrawableTimeout: NO];
64 let _: () = msg_send![&*layer, setNeedsDisplayOnBoundsChange: YES];
65 let _: () = msg_send![
66 &*layer,
67 setAutoresizingMask: AutoresizingMask::WIDTH_SIZABLE
68 | AutoresizingMask::HEIGHT_SIZABLE
69 ];
70 }
71 #[cfg(feature = "runtime_shaders")]
72 let library = device
73 .new_library_with_source(&SHADERS_SOURCE_FILE, &metal::CompileOptions::new())
74 .expect("error building metal library");
75 #[cfg(not(feature = "runtime_shaders"))]
76 let library = device
77 .new_library_with_data(SHADERS_METALLIB)
78 .expect("error building metal library");
79
80 fn to_float2_bits(point: crate::PointF) -> u64 {
81 let mut output = point.y.to_bits() as u64;
82 output <<= 32;
83 output |= point.x.to_bits() as u64;
84 output
85 }
86
87 let unit_vertices = [
88 to_float2_bits(point(0., 0.)),
89 to_float2_bits(point(1., 0.)),
90 to_float2_bits(point(0., 1.)),
91 to_float2_bits(point(0., 1.)),
92 to_float2_bits(point(1., 0.)),
93 to_float2_bits(point(1., 1.)),
94 ];
95 let unit_vertices = device.new_buffer_with_data(
96 unit_vertices.as_ptr() as *const c_void,
97 mem::size_of_val(&unit_vertices) as u64,
98 MTLResourceOptions::StorageModeManaged,
99 );
100
101 let paths_rasterization_pipeline_state = build_path_rasterization_pipeline_state(
102 &device,
103 &library,
104 "paths_rasterization",
105 "path_rasterization_vertex",
106 "path_rasterization_fragment",
107 MTLPixelFormat::R16Float,
108 );
109 let path_sprites_pipeline_state = build_pipeline_state(
110 &device,
111 &library,
112 "path_sprites",
113 "path_sprite_vertex",
114 "path_sprite_fragment",
115 MTLPixelFormat::BGRA8Unorm,
116 );
117 let shadows_pipeline_state = build_pipeline_state(
118 &device,
119 &library,
120 "shadows",
121 "shadow_vertex",
122 "shadow_fragment",
123 MTLPixelFormat::BGRA8Unorm,
124 );
125 let quads_pipeline_state = build_pipeline_state(
126 &device,
127 &library,
128 "quads",
129 "quad_vertex",
130 "quad_fragment",
131 MTLPixelFormat::BGRA8Unorm,
132 );
133 let underlines_pipeline_state = build_pipeline_state(
134 &device,
135 &library,
136 "underlines",
137 "underline_vertex",
138 "underline_fragment",
139 MTLPixelFormat::BGRA8Unorm,
140 );
141 let monochrome_sprites_pipeline_state = build_pipeline_state(
142 &device,
143 &library,
144 "monochrome_sprites",
145 "monochrome_sprite_vertex",
146 "monochrome_sprite_fragment",
147 MTLPixelFormat::BGRA8Unorm,
148 );
149 let polychrome_sprites_pipeline_state = build_pipeline_state(
150 &device,
151 &library,
152 "polychrome_sprites",
153 "polychrome_sprite_vertex",
154 "polychrome_sprite_fragment",
155 MTLPixelFormat::BGRA8Unorm,
156 );
157 let surfaces_pipeline_state = build_pipeline_state(
158 &device,
159 &library,
160 "surfaces",
161 "surface_vertex",
162 "surface_fragment",
163 MTLPixelFormat::BGRA8Unorm,
164 );
165
166 let command_queue = device.new_command_queue();
167 let sprite_atlas = Arc::new(MetalAtlas::new(device.clone()));
168 let core_video_texture_cache =
169 unsafe { CVMetalTextureCache::new(device.as_ptr()).unwrap() };
170
171 Self {
172 device,
173 layer,
174 command_queue,
175 paths_rasterization_pipeline_state,
176 path_sprites_pipeline_state,
177 shadows_pipeline_state,
178 quads_pipeline_state,
179 underlines_pipeline_state,
180 monochrome_sprites_pipeline_state,
181 polychrome_sprites_pipeline_state,
182 surfaces_pipeline_state,
183 unit_vertices,
184 instance_buffer_pool,
185 sprite_atlas,
186 core_video_texture_cache,
187 }
188 }
189
190 pub fn layer(&self) -> &metal::MetalLayerRef {
191 &self.layer
192 }
193
194 pub fn sprite_atlas(&self) -> &Arc<MetalAtlas> {
195 &self.sprite_atlas
196 }
197
198 pub fn draw(&mut self, scene: &Scene) {
199 let layer = self.layer.clone();
200 let viewport_size = layer.drawable_size();
201 let viewport_size: Size<DevicePixels> = size(
202 (viewport_size.width.ceil() as i32).into(),
203 (viewport_size.height.ceil() as i32).into(),
204 );
205 let drawable = if let Some(drawable) = layer.next_drawable() {
206 drawable
207 } else {
208 log::error!(
209 "failed to retrieve next drawable, drawable size: {:?}",
210 viewport_size
211 );
212 return;
213 };
214 let mut instance_buffer = self.instance_buffer_pool.lock().pop().unwrap_or_else(|| {
215 self.device.new_buffer(
216 INSTANCE_BUFFER_SIZE as u64,
217 MTLResourceOptions::StorageModeManaged,
218 )
219 });
220 let command_queue = self.command_queue.clone();
221 let command_buffer = command_queue.new_command_buffer();
222 let mut instance_offset = 0;
223
224 let Some(path_tiles) = self.rasterize_paths(
225 scene.paths(),
226 &mut instance_buffer,
227 &mut instance_offset,
228 command_buffer,
229 ) else {
230 log::error!("failed to rasterize {} paths", scene.paths().len());
231 return;
232 };
233
234 let render_pass_descriptor = metal::RenderPassDescriptor::new();
235 let color_attachment = render_pass_descriptor
236 .color_attachments()
237 .object_at(0)
238 .unwrap();
239
240 color_attachment.set_texture(Some(drawable.texture()));
241 color_attachment.set_load_action(metal::MTLLoadAction::Clear);
242 color_attachment.set_store_action(metal::MTLStoreAction::Store);
243 let alpha = if self.layer.is_opaque() { 1. } else { 0. };
244 color_attachment.set_clear_color(metal::MTLClearColor::new(0., 0., 0., alpha));
245 let command_encoder = command_buffer.new_render_command_encoder(render_pass_descriptor);
246
247 command_encoder.set_viewport(metal::MTLViewport {
248 originX: 0.0,
249 originY: 0.0,
250 width: i32::from(viewport_size.width) as f64,
251 height: i32::from(viewport_size.height) as f64,
252 znear: 0.0,
253 zfar: 1.0,
254 });
255 for batch in scene.batches() {
256 let ok = match batch {
257 PrimitiveBatch::Shadows(shadows) => self.draw_shadows(
258 shadows,
259 &mut instance_buffer,
260 &mut instance_offset,
261 viewport_size,
262 command_encoder,
263 ),
264 PrimitiveBatch::Quads(quads) => self.draw_quads(
265 quads,
266 &mut instance_buffer,
267 &mut instance_offset,
268 viewport_size,
269 command_encoder,
270 ),
271 PrimitiveBatch::Paths(paths) => self.draw_paths(
272 paths,
273 &path_tiles,
274 &mut instance_buffer,
275 &mut instance_offset,
276 viewport_size,
277 command_encoder,
278 ),
279 PrimitiveBatch::Underlines(underlines) => self.draw_underlines(
280 underlines,
281 &mut instance_buffer,
282 &mut instance_offset,
283 viewport_size,
284 command_encoder,
285 ),
286 PrimitiveBatch::MonochromeSprites {
287 texture_id,
288 sprites,
289 } => self.draw_monochrome_sprites(
290 texture_id,
291 sprites,
292 &mut instance_buffer,
293 &mut instance_offset,
294 viewport_size,
295 command_encoder,
296 ),
297 PrimitiveBatch::PolychromeSprites {
298 texture_id,
299 sprites,
300 } => self.draw_polychrome_sprites(
301 texture_id,
302 sprites,
303 &mut instance_buffer,
304 &mut instance_offset,
305 viewport_size,
306 command_encoder,
307 ),
308 PrimitiveBatch::Surfaces(surfaces) => self.draw_surfaces(
309 surfaces,
310 &mut instance_buffer,
311 &mut instance_offset,
312 viewport_size,
313 command_encoder,
314 ),
315 };
316
317 if !ok {
318 log::error!("scene too large: {} paths, {} shadows, {} quads, {} underlines, {} mono, {} poly, {} surfaces",
319 scene.paths.len(),
320 scene.shadows.len(),
321 scene.quads.len(),
322 scene.underlines.len(),
323 scene.monochrome_sprites.len(),
324 scene.polychrome_sprites.len(),
325 scene.surfaces.len(),
326 );
327 break;
328 }
329 }
330
331 command_encoder.end_encoding();
332
333 instance_buffer.did_modify_range(NSRange {
334 location: 0,
335 length: instance_offset as NSUInteger,
336 });
337
338 let instance_buffer_pool = self.instance_buffer_pool.clone();
339 let instance_buffer = Cell::new(Some(instance_buffer));
340 let block = ConcreteBlock::new(move |_| {
341 if let Some(instance_buffer) = instance_buffer.take() {
342 instance_buffer_pool.lock().push(instance_buffer);
343 }
344 });
345 let block = block.copy();
346 command_buffer.add_completed_handler(&block);
347 command_buffer.commit();
348 self.sprite_atlas.clear_textures(AtlasTextureKind::Path);
349
350 command_buffer.wait_until_scheduled();
351 drawable.present();
352 }
353
354 fn rasterize_paths(
355 &mut self,
356 paths: &[Path<ScaledPixels>],
357 instance_buffer: &mut metal::Buffer,
358 instance_offset: &mut usize,
359 command_buffer: &metal::CommandBufferRef,
360 ) -> Option<HashMap<PathId, AtlasTile>> {
361 let mut tiles = HashMap::default();
362 let mut vertices_by_texture_id = HashMap::default();
363 for path in paths {
364 let clipped_bounds = path.bounds.intersect(&path.content_mask.bounds);
365
366 let tile = self
367 .sprite_atlas
368 .allocate(clipped_bounds.size.map(Into::into), AtlasTextureKind::Path);
369 vertices_by_texture_id
370 .entry(tile.texture_id)
371 .or_insert(Vec::new())
372 .extend(path.vertices.iter().map(|vertex| PathVertex {
373 xy_position: vertex.xy_position - clipped_bounds.origin
374 + tile.bounds.origin.map(Into::into),
375 st_position: vertex.st_position,
376 content_mask: ContentMask {
377 bounds: tile.bounds.map(Into::into),
378 },
379 }));
380 tiles.insert(path.id, tile);
381 }
382
383 for (texture_id, vertices) in vertices_by_texture_id {
384 align_offset(instance_offset);
385 let vertices_bytes_len = mem::size_of_val(vertices.as_slice());
386 let next_offset = *instance_offset + vertices_bytes_len;
387 if next_offset > INSTANCE_BUFFER_SIZE {
388 return None;
389 }
390
391 let render_pass_descriptor = metal::RenderPassDescriptor::new();
392 let color_attachment = render_pass_descriptor
393 .color_attachments()
394 .object_at(0)
395 .unwrap();
396
397 let texture = self.sprite_atlas.metal_texture(texture_id);
398 color_attachment.set_texture(Some(&texture));
399 color_attachment.set_load_action(metal::MTLLoadAction::Clear);
400 color_attachment.set_store_action(metal::MTLStoreAction::Store);
401 color_attachment.set_clear_color(metal::MTLClearColor::new(0., 0., 0., 1.));
402 let command_encoder = command_buffer.new_render_command_encoder(render_pass_descriptor);
403 command_encoder.set_render_pipeline_state(&self.paths_rasterization_pipeline_state);
404 command_encoder.set_vertex_buffer(
405 PathRasterizationInputIndex::Vertices as u64,
406 Some(instance_buffer),
407 *instance_offset as u64,
408 );
409 let texture_size = Size {
410 width: DevicePixels::from(texture.width()),
411 height: DevicePixels::from(texture.height()),
412 };
413 command_encoder.set_vertex_bytes(
414 PathRasterizationInputIndex::AtlasTextureSize as u64,
415 mem::size_of_val(&texture_size) as u64,
416 &texture_size as *const Size<DevicePixels> as *const _,
417 );
418
419 let buffer_contents =
420 unsafe { (instance_buffer.contents() as *mut u8).add(*instance_offset) };
421 unsafe {
422 ptr::copy_nonoverlapping(
423 vertices.as_ptr() as *const u8,
424 buffer_contents,
425 vertices_bytes_len,
426 );
427 }
428
429 command_encoder.draw_primitives(
430 metal::MTLPrimitiveType::Triangle,
431 0,
432 vertices.len() as u64,
433 );
434 command_encoder.end_encoding();
435 *instance_offset = next_offset;
436 }
437
438 Some(tiles)
439 }
440
441 fn draw_shadows(
442 &mut self,
443 shadows: &[Shadow],
444 instance_buffer: &mut metal::Buffer,
445 instance_offset: &mut usize,
446 viewport_size: Size<DevicePixels>,
447 command_encoder: &metal::RenderCommandEncoderRef,
448 ) -> bool {
449 if shadows.is_empty() {
450 return true;
451 }
452 align_offset(instance_offset);
453
454 command_encoder.set_render_pipeline_state(&self.shadows_pipeline_state);
455 command_encoder.set_vertex_buffer(
456 ShadowInputIndex::Vertices as u64,
457 Some(&self.unit_vertices),
458 0,
459 );
460 command_encoder.set_vertex_buffer(
461 ShadowInputIndex::Shadows as u64,
462 Some(instance_buffer),
463 *instance_offset as u64,
464 );
465 command_encoder.set_fragment_buffer(
466 ShadowInputIndex::Shadows as u64,
467 Some(instance_buffer),
468 *instance_offset as u64,
469 );
470
471 command_encoder.set_vertex_bytes(
472 ShadowInputIndex::ViewportSize as u64,
473 mem::size_of_val(&viewport_size) as u64,
474 &viewport_size as *const Size<DevicePixels> as *const _,
475 );
476
477 let shadow_bytes_len = mem::size_of_val(shadows);
478 let buffer_contents =
479 unsafe { (instance_buffer.contents() as *mut u8).add(*instance_offset) };
480
481 let next_offset = *instance_offset + shadow_bytes_len;
482 if next_offset > INSTANCE_BUFFER_SIZE {
483 return false;
484 }
485
486 unsafe {
487 ptr::copy_nonoverlapping(
488 shadows.as_ptr() as *const u8,
489 buffer_contents,
490 shadow_bytes_len,
491 );
492 }
493
494 command_encoder.draw_primitives_instanced(
495 metal::MTLPrimitiveType::Triangle,
496 0,
497 6,
498 shadows.len() as u64,
499 );
500 *instance_offset = next_offset;
501 true
502 }
503
504 fn draw_quads(
505 &mut self,
506 quads: &[Quad],
507 instance_buffer: &mut metal::Buffer,
508 instance_offset: &mut usize,
509 viewport_size: Size<DevicePixels>,
510 command_encoder: &metal::RenderCommandEncoderRef,
511 ) -> bool {
512 if quads.is_empty() {
513 return true;
514 }
515 align_offset(instance_offset);
516
517 command_encoder.set_render_pipeline_state(&self.quads_pipeline_state);
518 command_encoder.set_vertex_buffer(
519 QuadInputIndex::Vertices as u64,
520 Some(&self.unit_vertices),
521 0,
522 );
523 command_encoder.set_vertex_buffer(
524 QuadInputIndex::Quads as u64,
525 Some(instance_buffer),
526 *instance_offset as u64,
527 );
528 command_encoder.set_fragment_buffer(
529 QuadInputIndex::Quads as u64,
530 Some(instance_buffer),
531 *instance_offset as u64,
532 );
533
534 command_encoder.set_vertex_bytes(
535 QuadInputIndex::ViewportSize as u64,
536 mem::size_of_val(&viewport_size) as u64,
537 &viewport_size as *const Size<DevicePixels> as *const _,
538 );
539
540 let quad_bytes_len = mem::size_of_val(quads);
541 let buffer_contents =
542 unsafe { (instance_buffer.contents() as *mut u8).add(*instance_offset) };
543
544 let next_offset = *instance_offset + quad_bytes_len;
545 if next_offset > INSTANCE_BUFFER_SIZE {
546 return false;
547 }
548
549 unsafe {
550 ptr::copy_nonoverlapping(quads.as_ptr() as *const u8, buffer_contents, quad_bytes_len);
551 }
552
553 command_encoder.draw_primitives_instanced(
554 metal::MTLPrimitiveType::Triangle,
555 0,
556 6,
557 quads.len() as u64,
558 );
559 *instance_offset = next_offset;
560 true
561 }
562
563 fn draw_paths(
564 &mut self,
565 paths: &[Path<ScaledPixels>],
566 tiles_by_path_id: &HashMap<PathId, AtlasTile>,
567 instance_buffer: &mut metal::Buffer,
568 instance_offset: &mut usize,
569 viewport_size: Size<DevicePixels>,
570 command_encoder: &metal::RenderCommandEncoderRef,
571 ) -> bool {
572 if paths.is_empty() {
573 return true;
574 }
575
576 command_encoder.set_render_pipeline_state(&self.path_sprites_pipeline_state);
577 command_encoder.set_vertex_buffer(
578 SpriteInputIndex::Vertices as u64,
579 Some(&self.unit_vertices),
580 0,
581 );
582 command_encoder.set_vertex_bytes(
583 SpriteInputIndex::ViewportSize as u64,
584 mem::size_of_val(&viewport_size) as u64,
585 &viewport_size as *const Size<DevicePixels> as *const _,
586 );
587
588 let mut prev_texture_id = None;
589 let mut sprites = SmallVec::<[_; 1]>::new();
590 let mut paths_and_tiles = paths
591 .iter()
592 .map(|path| (path, tiles_by_path_id.get(&path.id).unwrap()))
593 .peekable();
594
595 loop {
596 if let Some((path, tile)) = paths_and_tiles.peek() {
597 if prev_texture_id.map_or(true, |texture_id| texture_id == tile.texture_id) {
598 prev_texture_id = Some(tile.texture_id);
599 let origin = path.bounds.intersect(&path.content_mask.bounds).origin;
600 sprites.push(PathSprite {
601 bounds: Bounds {
602 origin: origin.map(|p| p.floor()),
603 size: tile.bounds.size.map(Into::into),
604 },
605 color: path.color,
606 tile: (*tile).clone(),
607 });
608 paths_and_tiles.next();
609 continue;
610 }
611 }
612
613 if sprites.is_empty() {
614 break;
615 } else {
616 align_offset(instance_offset);
617 let texture_id = prev_texture_id.take().unwrap();
618 let texture: metal::Texture = self.sprite_atlas.metal_texture(texture_id);
619 let texture_size = size(
620 DevicePixels(texture.width() as i32),
621 DevicePixels(texture.height() as i32),
622 );
623
624 command_encoder.set_vertex_buffer(
625 SpriteInputIndex::Sprites as u64,
626 Some(instance_buffer),
627 *instance_offset as u64,
628 );
629 command_encoder.set_vertex_bytes(
630 SpriteInputIndex::AtlasTextureSize as u64,
631 mem::size_of_val(&texture_size) as u64,
632 &texture_size as *const Size<DevicePixels> as *const _,
633 );
634 command_encoder.set_fragment_buffer(
635 SpriteInputIndex::Sprites as u64,
636 Some(instance_buffer),
637 *instance_offset as u64,
638 );
639 command_encoder
640 .set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
641
642 let sprite_bytes_len = mem::size_of_val(sprites.as_slice());
643 let next_offset = *instance_offset + sprite_bytes_len;
644 if next_offset > INSTANCE_BUFFER_SIZE {
645 return false;
646 }
647
648 let buffer_contents =
649 unsafe { (instance_buffer.contents() as *mut u8).add(*instance_offset) };
650
651 unsafe {
652 ptr::copy_nonoverlapping(
653 sprites.as_ptr() as *const u8,
654 buffer_contents,
655 sprite_bytes_len,
656 );
657 }
658
659 command_encoder.draw_primitives_instanced(
660 metal::MTLPrimitiveType::Triangle,
661 0,
662 6,
663 sprites.len() as u64,
664 );
665 *instance_offset = next_offset;
666 sprites.clear();
667 }
668 }
669 true
670 }
671
672 fn draw_underlines(
673 &mut self,
674 underlines: &[Underline],
675 instance_buffer: &mut metal::Buffer,
676 instance_offset: &mut usize,
677 viewport_size: Size<DevicePixels>,
678 command_encoder: &metal::RenderCommandEncoderRef,
679 ) -> bool {
680 if underlines.is_empty() {
681 return true;
682 }
683 align_offset(instance_offset);
684
685 command_encoder.set_render_pipeline_state(&self.underlines_pipeline_state);
686 command_encoder.set_vertex_buffer(
687 UnderlineInputIndex::Vertices as u64,
688 Some(&self.unit_vertices),
689 0,
690 );
691 command_encoder.set_vertex_buffer(
692 UnderlineInputIndex::Underlines as u64,
693 Some(instance_buffer),
694 *instance_offset as u64,
695 );
696 command_encoder.set_fragment_buffer(
697 UnderlineInputIndex::Underlines as u64,
698 Some(instance_buffer),
699 *instance_offset as u64,
700 );
701
702 command_encoder.set_vertex_bytes(
703 UnderlineInputIndex::ViewportSize as u64,
704 mem::size_of_val(&viewport_size) as u64,
705 &viewport_size as *const Size<DevicePixels> as *const _,
706 );
707
708 let underline_bytes_len = mem::size_of_val(underlines);
709 let buffer_contents =
710 unsafe { (instance_buffer.contents() as *mut u8).add(*instance_offset) };
711
712 let next_offset = *instance_offset + underline_bytes_len;
713 if next_offset > INSTANCE_BUFFER_SIZE {
714 return false;
715 }
716
717 unsafe {
718 ptr::copy_nonoverlapping(
719 underlines.as_ptr() as *const u8,
720 buffer_contents,
721 underline_bytes_len,
722 );
723 }
724
725 command_encoder.draw_primitives_instanced(
726 metal::MTLPrimitiveType::Triangle,
727 0,
728 6,
729 underlines.len() as u64,
730 );
731 *instance_offset = next_offset;
732 true
733 }
734
735 fn draw_monochrome_sprites(
736 &mut self,
737 texture_id: AtlasTextureId,
738 sprites: &[MonochromeSprite],
739 instance_buffer: &mut metal::Buffer,
740 instance_offset: &mut usize,
741 viewport_size: Size<DevicePixels>,
742 command_encoder: &metal::RenderCommandEncoderRef,
743 ) -> bool {
744 if sprites.is_empty() {
745 return true;
746 }
747 align_offset(instance_offset);
748
749 let texture = self.sprite_atlas.metal_texture(texture_id);
750 let texture_size = size(
751 DevicePixels(texture.width() as i32),
752 DevicePixels(texture.height() as i32),
753 );
754 command_encoder.set_render_pipeline_state(&self.monochrome_sprites_pipeline_state);
755 command_encoder.set_vertex_buffer(
756 SpriteInputIndex::Vertices as u64,
757 Some(&self.unit_vertices),
758 0,
759 );
760 command_encoder.set_vertex_buffer(
761 SpriteInputIndex::Sprites as u64,
762 Some(instance_buffer),
763 *instance_offset as u64,
764 );
765 command_encoder.set_vertex_bytes(
766 SpriteInputIndex::ViewportSize as u64,
767 mem::size_of_val(&viewport_size) as u64,
768 &viewport_size as *const Size<DevicePixels> as *const _,
769 );
770 command_encoder.set_vertex_bytes(
771 SpriteInputIndex::AtlasTextureSize as u64,
772 mem::size_of_val(&texture_size) as u64,
773 &texture_size as *const Size<DevicePixels> as *const _,
774 );
775 command_encoder.set_fragment_buffer(
776 SpriteInputIndex::Sprites as u64,
777 Some(instance_buffer),
778 *instance_offset as u64,
779 );
780 command_encoder.set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
781
782 let sprite_bytes_len = mem::size_of_val(sprites);
783 let buffer_contents =
784 unsafe { (instance_buffer.contents() as *mut u8).add(*instance_offset) };
785
786 let next_offset = *instance_offset + sprite_bytes_len;
787 if next_offset > INSTANCE_BUFFER_SIZE {
788 return false;
789 }
790
791 unsafe {
792 ptr::copy_nonoverlapping(
793 sprites.as_ptr() as *const u8,
794 buffer_contents,
795 sprite_bytes_len,
796 );
797 }
798
799 command_encoder.draw_primitives_instanced(
800 metal::MTLPrimitiveType::Triangle,
801 0,
802 6,
803 sprites.len() as u64,
804 );
805 *instance_offset = next_offset;
806 true
807 }
808
809 fn draw_polychrome_sprites(
810 &mut self,
811 texture_id: AtlasTextureId,
812 sprites: &[PolychromeSprite],
813 instance_buffer: &mut metal::Buffer,
814 instance_offset: &mut usize,
815 viewport_size: Size<DevicePixels>,
816 command_encoder: &metal::RenderCommandEncoderRef,
817 ) -> bool {
818 if sprites.is_empty() {
819 return true;
820 }
821 align_offset(instance_offset);
822
823 let texture = self.sprite_atlas.metal_texture(texture_id);
824 let texture_size = size(
825 DevicePixels(texture.width() as i32),
826 DevicePixels(texture.height() as i32),
827 );
828 command_encoder.set_render_pipeline_state(&self.polychrome_sprites_pipeline_state);
829 command_encoder.set_vertex_buffer(
830 SpriteInputIndex::Vertices as u64,
831 Some(&self.unit_vertices),
832 0,
833 );
834 command_encoder.set_vertex_buffer(
835 SpriteInputIndex::Sprites as u64,
836 Some(instance_buffer),
837 *instance_offset as u64,
838 );
839 command_encoder.set_vertex_bytes(
840 SpriteInputIndex::ViewportSize as u64,
841 mem::size_of_val(&viewport_size) as u64,
842 &viewport_size as *const Size<DevicePixels> as *const _,
843 );
844 command_encoder.set_vertex_bytes(
845 SpriteInputIndex::AtlasTextureSize as u64,
846 mem::size_of_val(&texture_size) as u64,
847 &texture_size as *const Size<DevicePixels> as *const _,
848 );
849 command_encoder.set_fragment_buffer(
850 SpriteInputIndex::Sprites as u64,
851 Some(instance_buffer),
852 *instance_offset as u64,
853 );
854 command_encoder.set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
855
856 let sprite_bytes_len = mem::size_of_val(sprites);
857 let buffer_contents =
858 unsafe { (instance_buffer.contents() as *mut u8).add(*instance_offset) };
859
860 let next_offset = *instance_offset + sprite_bytes_len;
861 if next_offset > INSTANCE_BUFFER_SIZE {
862 return false;
863 }
864
865 unsafe {
866 ptr::copy_nonoverlapping(
867 sprites.as_ptr() as *const u8,
868 buffer_contents,
869 sprite_bytes_len,
870 );
871 }
872
873 command_encoder.draw_primitives_instanced(
874 metal::MTLPrimitiveType::Triangle,
875 0,
876 6,
877 sprites.len() as u64,
878 );
879 *instance_offset = next_offset;
880 true
881 }
882
883 fn draw_surfaces(
884 &mut self,
885 surfaces: &[Surface],
886 instance_buffer: &mut metal::Buffer,
887 instance_offset: &mut usize,
888 viewport_size: Size<DevicePixels>,
889 command_encoder: &metal::RenderCommandEncoderRef,
890 ) -> bool {
891 command_encoder.set_render_pipeline_state(&self.surfaces_pipeline_state);
892 command_encoder.set_vertex_buffer(
893 SurfaceInputIndex::Vertices as u64,
894 Some(&self.unit_vertices),
895 0,
896 );
897 command_encoder.set_vertex_bytes(
898 SurfaceInputIndex::ViewportSize as u64,
899 mem::size_of_val(&viewport_size) as u64,
900 &viewport_size as *const Size<DevicePixels> as *const _,
901 );
902
903 for surface in surfaces {
904 let texture_size = size(
905 DevicePixels::from(surface.image_buffer.width() as i32),
906 DevicePixels::from(surface.image_buffer.height() as i32),
907 );
908
909 assert_eq!(
910 surface.image_buffer.pixel_format_type(),
911 media::core_video::kCVPixelFormatType_420YpCbCr8BiPlanarFullRange
912 );
913
914 let y_texture = unsafe {
915 self.core_video_texture_cache
916 .create_texture_from_image(
917 surface.image_buffer.as_concrete_TypeRef(),
918 ptr::null(),
919 MTLPixelFormat::R8Unorm,
920 surface.image_buffer.plane_width(0),
921 surface.image_buffer.plane_height(0),
922 0,
923 )
924 .unwrap()
925 };
926 let cb_cr_texture = unsafe {
927 self.core_video_texture_cache
928 .create_texture_from_image(
929 surface.image_buffer.as_concrete_TypeRef(),
930 ptr::null(),
931 MTLPixelFormat::RG8Unorm,
932 surface.image_buffer.plane_width(1),
933 surface.image_buffer.plane_height(1),
934 1,
935 )
936 .unwrap()
937 };
938
939 align_offset(instance_offset);
940 let next_offset = *instance_offset + mem::size_of::<Surface>();
941 if next_offset > INSTANCE_BUFFER_SIZE {
942 return false;
943 }
944
945 command_encoder.set_vertex_buffer(
946 SurfaceInputIndex::Surfaces as u64,
947 Some(instance_buffer),
948 *instance_offset as u64,
949 );
950 command_encoder.set_vertex_bytes(
951 SurfaceInputIndex::TextureSize as u64,
952 mem::size_of_val(&texture_size) as u64,
953 &texture_size as *const Size<DevicePixels> as *const _,
954 );
955 command_encoder.set_fragment_texture(
956 SurfaceInputIndex::YTexture as u64,
957 Some(y_texture.as_texture_ref()),
958 );
959 command_encoder.set_fragment_texture(
960 SurfaceInputIndex::CbCrTexture as u64,
961 Some(cb_cr_texture.as_texture_ref()),
962 );
963
964 unsafe {
965 let buffer_contents = (instance_buffer.contents() as *mut u8).add(*instance_offset)
966 as *mut SurfaceBounds;
967 ptr::write(
968 buffer_contents,
969 SurfaceBounds {
970 bounds: surface.bounds,
971 content_mask: surface.content_mask.clone(),
972 },
973 );
974 }
975
976 command_encoder.draw_primitives(metal::MTLPrimitiveType::Triangle, 0, 6);
977 *instance_offset = next_offset;
978 }
979 true
980 }
981}
982
983fn build_pipeline_state(
984 device: &metal::DeviceRef,
985 library: &metal::LibraryRef,
986 label: &str,
987 vertex_fn_name: &str,
988 fragment_fn_name: &str,
989 pixel_format: metal::MTLPixelFormat,
990) -> metal::RenderPipelineState {
991 let vertex_fn = library
992 .get_function(vertex_fn_name, None)
993 .expect("error locating vertex function");
994 let fragment_fn = library
995 .get_function(fragment_fn_name, None)
996 .expect("error locating fragment function");
997
998 let descriptor = metal::RenderPipelineDescriptor::new();
999 descriptor.set_label(label);
1000 descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
1001 descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
1002 let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
1003 color_attachment.set_pixel_format(pixel_format);
1004 color_attachment.set_blending_enabled(true);
1005 color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
1006 color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
1007 color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::SourceAlpha);
1008 color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::One);
1009 color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
1010 color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::One);
1011
1012 device
1013 .new_render_pipeline_state(&descriptor)
1014 .expect("could not create render pipeline state")
1015}
1016
1017fn build_path_rasterization_pipeline_state(
1018 device: &metal::DeviceRef,
1019 library: &metal::LibraryRef,
1020 label: &str,
1021 vertex_fn_name: &str,
1022 fragment_fn_name: &str,
1023 pixel_format: metal::MTLPixelFormat,
1024) -> metal::RenderPipelineState {
1025 let vertex_fn = library
1026 .get_function(vertex_fn_name, None)
1027 .expect("error locating vertex function");
1028 let fragment_fn = library
1029 .get_function(fragment_fn_name, None)
1030 .expect("error locating fragment function");
1031
1032 let descriptor = metal::RenderPipelineDescriptor::new();
1033 descriptor.set_label(label);
1034 descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
1035 descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
1036 let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
1037 color_attachment.set_pixel_format(pixel_format);
1038 color_attachment.set_blending_enabled(true);
1039 color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
1040 color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
1041 color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::One);
1042 color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::One);
1043 color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::One);
1044 color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::One);
1045
1046 device
1047 .new_render_pipeline_state(&descriptor)
1048 .expect("could not create render pipeline state")
1049}
1050
1051// Align to multiples of 256 make Metal happy.
1052fn align_offset(offset: &mut usize) {
1053 *offset = ((*offset + 255) / 256) * 256;
1054}
1055
1056#[repr(C)]
1057enum ShadowInputIndex {
1058 Vertices = 0,
1059 Shadows = 1,
1060 ViewportSize = 2,
1061}
1062
1063#[repr(C)]
1064enum QuadInputIndex {
1065 Vertices = 0,
1066 Quads = 1,
1067 ViewportSize = 2,
1068}
1069
1070#[repr(C)]
1071enum UnderlineInputIndex {
1072 Vertices = 0,
1073 Underlines = 1,
1074 ViewportSize = 2,
1075}
1076
1077#[repr(C)]
1078enum SpriteInputIndex {
1079 Vertices = 0,
1080 Sprites = 1,
1081 ViewportSize = 2,
1082 AtlasTextureSize = 3,
1083 AtlasTexture = 4,
1084}
1085
1086#[repr(C)]
1087enum SurfaceInputIndex {
1088 Vertices = 0,
1089 Surfaces = 1,
1090 ViewportSize = 2,
1091 TextureSize = 3,
1092 YTexture = 4,
1093 CbCrTexture = 5,
1094}
1095
1096#[repr(C)]
1097enum PathRasterizationInputIndex {
1098 Vertices = 0,
1099 AtlasTextureSize = 1,
1100}
1101
1102#[derive(Clone, Debug, Eq, PartialEq)]
1103#[repr(C)]
1104pub struct PathSprite {
1105 pub bounds: Bounds<ScaledPixels>,
1106 pub color: Hsla,
1107 pub tile: AtlasTile,
1108}
1109
1110#[derive(Clone, Debug, Eq, PartialEq)]
1111#[repr(C)]
1112pub struct SurfaceBounds {
1113 pub bounds: Bounds<ScaledPixels>,
1114 pub content_mask: ContentMask<ScaledPixels>,
1115}