1use super::metal_atlas::MetalAtlas;
2use crate::{
3 point, size, AtlasTextureId, AtlasTextureKind, AtlasTile, Bounds, ContentMask, DevicePixels,
4 Hsla, MonochromeSprite, Path, PathId, PathVertex, PolychromeSprite, PrimitiveBatch, Quad,
5 ScaledPixels, Scene, Shadow, Size, Surface, Underline,
6};
7use block::ConcreteBlock;
8use cocoa::{
9 base::{NO, YES},
10 foundation::NSUInteger,
11 quartzcore::AutoresizingMask,
12};
13use collections::HashMap;
14use core_foundation::base::TCFType;
15use foreign_types::ForeignType;
16use media::core_video::CVMetalTextureCache;
17use metal::{CAMetalLayer, CommandQueue, MTLPixelFormat, MTLResourceOptions, NSRange};
18use objc::{self, msg_send, sel, sel_impl};
19use parking_lot::Mutex;
20use smallvec::SmallVec;
21use std::{cell::Cell, ffi::c_void, mem, ptr, sync::Arc};
22
23// Exported to metal
24pub(crate) type PointF = crate::Point<f32>;
25
26#[cfg(not(feature = "runtime_shaders"))]
27const SHADERS_METALLIB: &[u8] = include_bytes!(concat!(env!("OUT_DIR"), "/shaders.metallib"));
28#[cfg(feature = "runtime_shaders")]
29const SHADERS_SOURCE_FILE: &str = include_str!(concat!(env!("OUT_DIR"), "/stitched_shaders.metal"));
30const INSTANCE_BUFFER_SIZE: usize = 2 * 1024 * 1024; // This is an arbitrary decision. There's probably a more optimal value (maybe even we could adjust dynamically...)
31
32pub type Context = Arc<Mutex<Vec<metal::Buffer>>>;
33pub type Renderer = MetalRenderer;
34
35pub unsafe fn new_renderer(
36 context: self::Context,
37 _native_window: *mut c_void,
38 _native_view: *mut c_void,
39 _bounds: crate::Size<f32>,
40) -> Renderer {
41 MetalRenderer::new(context)
42}
43
44pub(crate) struct MetalRenderer {
45 device: metal::Device,
46 layer: metal::MetalLayer,
47 presents_with_transaction: bool,
48 command_queue: CommandQueue,
49 paths_rasterization_pipeline_state: metal::RenderPipelineState,
50 path_sprites_pipeline_state: metal::RenderPipelineState,
51 shadows_pipeline_state: metal::RenderPipelineState,
52 quads_pipeline_state: metal::RenderPipelineState,
53 underlines_pipeline_state: metal::RenderPipelineState,
54 monochrome_sprites_pipeline_state: metal::RenderPipelineState,
55 polychrome_sprites_pipeline_state: metal::RenderPipelineState,
56 surfaces_pipeline_state: metal::RenderPipelineState,
57 unit_vertices: metal::Buffer,
58 #[allow(clippy::arc_with_non_send_sync)]
59 instance_buffer_pool: Arc<Mutex<Vec<metal::Buffer>>>,
60 sprite_atlas: Arc<MetalAtlas>,
61 core_video_texture_cache: CVMetalTextureCache,
62}
63
64impl MetalRenderer {
65 pub fn new(instance_buffer_pool: Arc<Mutex<Vec<metal::Buffer>>>) -> Self {
66 let device: metal::Device = if let Some(device) = metal::Device::system_default() {
67 device
68 } else {
69 log::error!("unable to access a compatible graphics device");
70 std::process::exit(1);
71 };
72
73 let layer = metal::MetalLayer::new();
74 layer.set_device(&device);
75 layer.set_pixel_format(MTLPixelFormat::BGRA8Unorm);
76 layer.set_opaque(true);
77 layer.set_maximum_drawable_count(3);
78 unsafe {
79 let _: () = msg_send![&*layer, setAllowsNextDrawableTimeout: NO];
80 let _: () = msg_send![&*layer, setNeedsDisplayOnBoundsChange: YES];
81 let _: () = msg_send![
82 &*layer,
83 setAutoresizingMask: AutoresizingMask::WIDTH_SIZABLE
84 | AutoresizingMask::HEIGHT_SIZABLE
85 ];
86 }
87 #[cfg(feature = "runtime_shaders")]
88 let library = device
89 .new_library_with_source(&SHADERS_SOURCE_FILE, &metal::CompileOptions::new())
90 .expect("error building metal library");
91 #[cfg(not(feature = "runtime_shaders"))]
92 let library = device
93 .new_library_with_data(SHADERS_METALLIB)
94 .expect("error building metal library");
95
96 fn to_float2_bits(point: PointF) -> u64 {
97 let mut output = point.y.to_bits() as u64;
98 output <<= 32;
99 output |= point.x.to_bits() as u64;
100 output
101 }
102
103 let unit_vertices = [
104 to_float2_bits(point(0., 0.)),
105 to_float2_bits(point(1., 0.)),
106 to_float2_bits(point(0., 1.)),
107 to_float2_bits(point(0., 1.)),
108 to_float2_bits(point(1., 0.)),
109 to_float2_bits(point(1., 1.)),
110 ];
111 let unit_vertices = device.new_buffer_with_data(
112 unit_vertices.as_ptr() as *const c_void,
113 mem::size_of_val(&unit_vertices) as u64,
114 MTLResourceOptions::StorageModeManaged,
115 );
116
117 let paths_rasterization_pipeline_state = build_path_rasterization_pipeline_state(
118 &device,
119 &library,
120 "paths_rasterization",
121 "path_rasterization_vertex",
122 "path_rasterization_fragment",
123 MTLPixelFormat::R16Float,
124 );
125 let path_sprites_pipeline_state = build_pipeline_state(
126 &device,
127 &library,
128 "path_sprites",
129 "path_sprite_vertex",
130 "path_sprite_fragment",
131 MTLPixelFormat::BGRA8Unorm,
132 );
133 let shadows_pipeline_state = build_pipeline_state(
134 &device,
135 &library,
136 "shadows",
137 "shadow_vertex",
138 "shadow_fragment",
139 MTLPixelFormat::BGRA8Unorm,
140 );
141 let quads_pipeline_state = build_pipeline_state(
142 &device,
143 &library,
144 "quads",
145 "quad_vertex",
146 "quad_fragment",
147 MTLPixelFormat::BGRA8Unorm,
148 );
149 let underlines_pipeline_state = build_pipeline_state(
150 &device,
151 &library,
152 "underlines",
153 "underline_vertex",
154 "underline_fragment",
155 MTLPixelFormat::BGRA8Unorm,
156 );
157 let monochrome_sprites_pipeline_state = build_pipeline_state(
158 &device,
159 &library,
160 "monochrome_sprites",
161 "monochrome_sprite_vertex",
162 "monochrome_sprite_fragment",
163 MTLPixelFormat::BGRA8Unorm,
164 );
165 let polychrome_sprites_pipeline_state = build_pipeline_state(
166 &device,
167 &library,
168 "polychrome_sprites",
169 "polychrome_sprite_vertex",
170 "polychrome_sprite_fragment",
171 MTLPixelFormat::BGRA8Unorm,
172 );
173 let surfaces_pipeline_state = build_pipeline_state(
174 &device,
175 &library,
176 "surfaces",
177 "surface_vertex",
178 "surface_fragment",
179 MTLPixelFormat::BGRA8Unorm,
180 );
181
182 let command_queue = device.new_command_queue();
183 let sprite_atlas = Arc::new(MetalAtlas::new(device.clone()));
184 let core_video_texture_cache =
185 unsafe { CVMetalTextureCache::new(device.as_ptr()).unwrap() };
186
187 Self {
188 device,
189 layer,
190 presents_with_transaction: false,
191 command_queue,
192 paths_rasterization_pipeline_state,
193 path_sprites_pipeline_state,
194 shadows_pipeline_state,
195 quads_pipeline_state,
196 underlines_pipeline_state,
197 monochrome_sprites_pipeline_state,
198 polychrome_sprites_pipeline_state,
199 surfaces_pipeline_state,
200 unit_vertices,
201 instance_buffer_pool,
202 sprite_atlas,
203 core_video_texture_cache,
204 }
205 }
206
207 pub fn layer(&self) -> &metal::MetalLayerRef {
208 &self.layer
209 }
210
211 pub fn layer_ptr(&self) -> *mut CAMetalLayer {
212 self.layer.as_ptr()
213 }
214
215 pub fn sprite_atlas(&self) -> &Arc<MetalAtlas> {
216 &self.sprite_atlas
217 }
218
219 pub fn set_presents_with_transaction(&mut self, presents_with_transaction: bool) {
220 self.presents_with_transaction = presents_with_transaction;
221 self.layer
222 .set_presents_with_transaction(presents_with_transaction);
223 }
224
225 pub fn update_drawable_size(&mut self, size: Size<f64>) {
226 unsafe {
227 let _: () = msg_send![
228 self.layer(),
229 setDrawableSize: size
230 ];
231 }
232 }
233
234 pub fn destroy(&mut self) {
235 // nothing to do
236 }
237
238 pub fn draw(&mut self, scene: &Scene) {
239 let layer = self.layer.clone();
240 let viewport_size = layer.drawable_size();
241 let viewport_size: Size<DevicePixels> = size(
242 (viewport_size.width.ceil() as i32).into(),
243 (viewport_size.height.ceil() as i32).into(),
244 );
245 let drawable = if let Some(drawable) = layer.next_drawable() {
246 drawable
247 } else {
248 log::error!(
249 "failed to retrieve next drawable, drawable size: {:?}",
250 viewport_size
251 );
252 return;
253 };
254 let mut instance_buffer = self.instance_buffer_pool.lock().pop().unwrap_or_else(|| {
255 self.device.new_buffer(
256 INSTANCE_BUFFER_SIZE as u64,
257 MTLResourceOptions::StorageModeManaged,
258 )
259 });
260 let command_queue = self.command_queue.clone();
261 let command_buffer = command_queue.new_command_buffer();
262 let mut instance_offset = 0;
263
264 let Some(path_tiles) = self.rasterize_paths(
265 scene.paths(),
266 &mut instance_buffer,
267 &mut instance_offset,
268 command_buffer,
269 ) else {
270 log::error!("failed to rasterize {} paths", scene.paths().len());
271 return;
272 };
273
274 let render_pass_descriptor = metal::RenderPassDescriptor::new();
275 let color_attachment = render_pass_descriptor
276 .color_attachments()
277 .object_at(0)
278 .unwrap();
279
280 color_attachment.set_texture(Some(drawable.texture()));
281 color_attachment.set_load_action(metal::MTLLoadAction::Clear);
282 color_attachment.set_store_action(metal::MTLStoreAction::Store);
283 let alpha = if self.layer.is_opaque() { 1. } else { 0. };
284 color_attachment.set_clear_color(metal::MTLClearColor::new(0., 0., 0., alpha));
285 let command_encoder = command_buffer.new_render_command_encoder(render_pass_descriptor);
286
287 command_encoder.set_viewport(metal::MTLViewport {
288 originX: 0.0,
289 originY: 0.0,
290 width: i32::from(viewport_size.width) as f64,
291 height: i32::from(viewport_size.height) as f64,
292 znear: 0.0,
293 zfar: 1.0,
294 });
295 for batch in scene.batches() {
296 let ok = match batch {
297 PrimitiveBatch::Shadows(shadows) => self.draw_shadows(
298 shadows,
299 &mut instance_buffer,
300 &mut instance_offset,
301 viewport_size,
302 command_encoder,
303 ),
304 PrimitiveBatch::Quads(quads) => self.draw_quads(
305 quads,
306 &mut instance_buffer,
307 &mut instance_offset,
308 viewport_size,
309 command_encoder,
310 ),
311 PrimitiveBatch::Paths(paths) => self.draw_paths(
312 paths,
313 &path_tiles,
314 &mut instance_buffer,
315 &mut instance_offset,
316 viewport_size,
317 command_encoder,
318 ),
319 PrimitiveBatch::Underlines(underlines) => self.draw_underlines(
320 underlines,
321 &mut instance_buffer,
322 &mut instance_offset,
323 viewport_size,
324 command_encoder,
325 ),
326 PrimitiveBatch::MonochromeSprites {
327 texture_id,
328 sprites,
329 } => self.draw_monochrome_sprites(
330 texture_id,
331 sprites,
332 &mut instance_buffer,
333 &mut instance_offset,
334 viewport_size,
335 command_encoder,
336 ),
337 PrimitiveBatch::PolychromeSprites {
338 texture_id,
339 sprites,
340 } => self.draw_polychrome_sprites(
341 texture_id,
342 sprites,
343 &mut instance_buffer,
344 &mut instance_offset,
345 viewport_size,
346 command_encoder,
347 ),
348 PrimitiveBatch::Surfaces(surfaces) => self.draw_surfaces(
349 surfaces,
350 &mut instance_buffer,
351 &mut instance_offset,
352 viewport_size,
353 command_encoder,
354 ),
355 };
356
357 if !ok {
358 log::error!("scene too large: {} paths, {} shadows, {} quads, {} underlines, {} mono, {} poly, {} surfaces",
359 scene.paths.len(),
360 scene.shadows.len(),
361 scene.quads.len(),
362 scene.underlines.len(),
363 scene.monochrome_sprites.len(),
364 scene.polychrome_sprites.len(),
365 scene.surfaces.len(),
366 );
367 break;
368 }
369 }
370
371 command_encoder.end_encoding();
372
373 instance_buffer.did_modify_range(NSRange {
374 location: 0,
375 length: instance_offset as NSUInteger,
376 });
377
378 let instance_buffer_pool = self.instance_buffer_pool.clone();
379 let instance_buffer = Cell::new(Some(instance_buffer));
380 let block = ConcreteBlock::new(move |_| {
381 if let Some(instance_buffer) = instance_buffer.take() {
382 instance_buffer_pool.lock().push(instance_buffer);
383 }
384 });
385 let block = block.copy();
386 command_buffer.add_completed_handler(&block);
387
388 self.sprite_atlas.clear_textures(AtlasTextureKind::Path);
389
390 if self.presents_with_transaction {
391 command_buffer.commit();
392 command_buffer.wait_until_scheduled();
393 drawable.present();
394 } else {
395 command_buffer.present_drawable(drawable);
396 command_buffer.commit();
397 }
398 }
399
400 fn rasterize_paths(
401 &mut self,
402 paths: &[Path<ScaledPixels>],
403 instance_buffer: &mut metal::Buffer,
404 instance_offset: &mut usize,
405 command_buffer: &metal::CommandBufferRef,
406 ) -> Option<HashMap<PathId, AtlasTile>> {
407 let mut tiles = HashMap::default();
408 let mut vertices_by_texture_id = HashMap::default();
409 for path in paths {
410 let clipped_bounds = path.bounds.intersect(&path.content_mask.bounds);
411
412 let tile = self
413 .sprite_atlas
414 .allocate(clipped_bounds.size.map(Into::into), AtlasTextureKind::Path);
415 vertices_by_texture_id
416 .entry(tile.texture_id)
417 .or_insert(Vec::new())
418 .extend(path.vertices.iter().map(|vertex| PathVertex {
419 xy_position: vertex.xy_position - clipped_bounds.origin
420 + tile.bounds.origin.map(Into::into),
421 st_position: vertex.st_position,
422 content_mask: ContentMask {
423 bounds: tile.bounds.map(Into::into),
424 },
425 }));
426 tiles.insert(path.id, tile);
427 }
428
429 for (texture_id, vertices) in vertices_by_texture_id {
430 align_offset(instance_offset);
431 let vertices_bytes_len = mem::size_of_val(vertices.as_slice());
432 let next_offset = *instance_offset + vertices_bytes_len;
433 if next_offset > INSTANCE_BUFFER_SIZE {
434 return None;
435 }
436
437 let render_pass_descriptor = metal::RenderPassDescriptor::new();
438 let color_attachment = render_pass_descriptor
439 .color_attachments()
440 .object_at(0)
441 .unwrap();
442
443 let texture = self.sprite_atlas.metal_texture(texture_id);
444 color_attachment.set_texture(Some(&texture));
445 color_attachment.set_load_action(metal::MTLLoadAction::Clear);
446 color_attachment.set_store_action(metal::MTLStoreAction::Store);
447 color_attachment.set_clear_color(metal::MTLClearColor::new(0., 0., 0., 1.));
448 let command_encoder = command_buffer.new_render_command_encoder(render_pass_descriptor);
449 command_encoder.set_render_pipeline_state(&self.paths_rasterization_pipeline_state);
450 command_encoder.set_vertex_buffer(
451 PathRasterizationInputIndex::Vertices as u64,
452 Some(instance_buffer),
453 *instance_offset as u64,
454 );
455 let texture_size = Size {
456 width: DevicePixels::from(texture.width()),
457 height: DevicePixels::from(texture.height()),
458 };
459 command_encoder.set_vertex_bytes(
460 PathRasterizationInputIndex::AtlasTextureSize as u64,
461 mem::size_of_val(&texture_size) as u64,
462 &texture_size as *const Size<DevicePixels> as *const _,
463 );
464
465 let buffer_contents =
466 unsafe { (instance_buffer.contents() as *mut u8).add(*instance_offset) };
467 unsafe {
468 ptr::copy_nonoverlapping(
469 vertices.as_ptr() as *const u8,
470 buffer_contents,
471 vertices_bytes_len,
472 );
473 }
474
475 command_encoder.draw_primitives(
476 metal::MTLPrimitiveType::Triangle,
477 0,
478 vertices.len() as u64,
479 );
480 command_encoder.end_encoding();
481 *instance_offset = next_offset;
482 }
483
484 Some(tiles)
485 }
486
487 fn draw_shadows(
488 &mut self,
489 shadows: &[Shadow],
490 instance_buffer: &mut metal::Buffer,
491 instance_offset: &mut usize,
492 viewport_size: Size<DevicePixels>,
493 command_encoder: &metal::RenderCommandEncoderRef,
494 ) -> bool {
495 if shadows.is_empty() {
496 return true;
497 }
498 align_offset(instance_offset);
499
500 command_encoder.set_render_pipeline_state(&self.shadows_pipeline_state);
501 command_encoder.set_vertex_buffer(
502 ShadowInputIndex::Vertices as u64,
503 Some(&self.unit_vertices),
504 0,
505 );
506 command_encoder.set_vertex_buffer(
507 ShadowInputIndex::Shadows as u64,
508 Some(instance_buffer),
509 *instance_offset as u64,
510 );
511 command_encoder.set_fragment_buffer(
512 ShadowInputIndex::Shadows as u64,
513 Some(instance_buffer),
514 *instance_offset as u64,
515 );
516
517 command_encoder.set_vertex_bytes(
518 ShadowInputIndex::ViewportSize as u64,
519 mem::size_of_val(&viewport_size) as u64,
520 &viewport_size as *const Size<DevicePixels> as *const _,
521 );
522
523 let shadow_bytes_len = mem::size_of_val(shadows);
524 let buffer_contents =
525 unsafe { (instance_buffer.contents() as *mut u8).add(*instance_offset) };
526
527 let next_offset = *instance_offset + shadow_bytes_len;
528 if next_offset > INSTANCE_BUFFER_SIZE {
529 return false;
530 }
531
532 unsafe {
533 ptr::copy_nonoverlapping(
534 shadows.as_ptr() as *const u8,
535 buffer_contents,
536 shadow_bytes_len,
537 );
538 }
539
540 command_encoder.draw_primitives_instanced(
541 metal::MTLPrimitiveType::Triangle,
542 0,
543 6,
544 shadows.len() as u64,
545 );
546 *instance_offset = next_offset;
547 true
548 }
549
550 fn draw_quads(
551 &mut self,
552 quads: &[Quad],
553 instance_buffer: &mut metal::Buffer,
554 instance_offset: &mut usize,
555 viewport_size: Size<DevicePixels>,
556 command_encoder: &metal::RenderCommandEncoderRef,
557 ) -> bool {
558 if quads.is_empty() {
559 return true;
560 }
561 align_offset(instance_offset);
562
563 command_encoder.set_render_pipeline_state(&self.quads_pipeline_state);
564 command_encoder.set_vertex_buffer(
565 QuadInputIndex::Vertices as u64,
566 Some(&self.unit_vertices),
567 0,
568 );
569 command_encoder.set_vertex_buffer(
570 QuadInputIndex::Quads as u64,
571 Some(instance_buffer),
572 *instance_offset as u64,
573 );
574 command_encoder.set_fragment_buffer(
575 QuadInputIndex::Quads as u64,
576 Some(instance_buffer),
577 *instance_offset as u64,
578 );
579
580 command_encoder.set_vertex_bytes(
581 QuadInputIndex::ViewportSize as u64,
582 mem::size_of_val(&viewport_size) as u64,
583 &viewport_size as *const Size<DevicePixels> as *const _,
584 );
585
586 let quad_bytes_len = mem::size_of_val(quads);
587 let buffer_contents =
588 unsafe { (instance_buffer.contents() as *mut u8).add(*instance_offset) };
589
590 let next_offset = *instance_offset + quad_bytes_len;
591 if next_offset > INSTANCE_BUFFER_SIZE {
592 return false;
593 }
594
595 unsafe {
596 ptr::copy_nonoverlapping(quads.as_ptr() as *const u8, buffer_contents, quad_bytes_len);
597 }
598
599 command_encoder.draw_primitives_instanced(
600 metal::MTLPrimitiveType::Triangle,
601 0,
602 6,
603 quads.len() as u64,
604 );
605 *instance_offset = next_offset;
606 true
607 }
608
609 fn draw_paths(
610 &mut self,
611 paths: &[Path<ScaledPixels>],
612 tiles_by_path_id: &HashMap<PathId, AtlasTile>,
613 instance_buffer: &mut metal::Buffer,
614 instance_offset: &mut usize,
615 viewport_size: Size<DevicePixels>,
616 command_encoder: &metal::RenderCommandEncoderRef,
617 ) -> bool {
618 if paths.is_empty() {
619 return true;
620 }
621
622 command_encoder.set_render_pipeline_state(&self.path_sprites_pipeline_state);
623 command_encoder.set_vertex_buffer(
624 SpriteInputIndex::Vertices as u64,
625 Some(&self.unit_vertices),
626 0,
627 );
628 command_encoder.set_vertex_bytes(
629 SpriteInputIndex::ViewportSize as u64,
630 mem::size_of_val(&viewport_size) as u64,
631 &viewport_size as *const Size<DevicePixels> as *const _,
632 );
633
634 let mut prev_texture_id = None;
635 let mut sprites = SmallVec::<[_; 1]>::new();
636 let mut paths_and_tiles = paths
637 .iter()
638 .map(|path| (path, tiles_by_path_id.get(&path.id).unwrap()))
639 .peekable();
640
641 loop {
642 if let Some((path, tile)) = paths_and_tiles.peek() {
643 if prev_texture_id.map_or(true, |texture_id| texture_id == tile.texture_id) {
644 prev_texture_id = Some(tile.texture_id);
645 let origin = path.bounds.intersect(&path.content_mask.bounds).origin;
646 sprites.push(PathSprite {
647 bounds: Bounds {
648 origin: origin.map(|p| p.floor()),
649 size: tile.bounds.size.map(Into::into),
650 },
651 color: path.color,
652 tile: (*tile).clone(),
653 });
654 paths_and_tiles.next();
655 continue;
656 }
657 }
658
659 if sprites.is_empty() {
660 break;
661 } else {
662 align_offset(instance_offset);
663 let texture_id = prev_texture_id.take().unwrap();
664 let texture: metal::Texture = self.sprite_atlas.metal_texture(texture_id);
665 let texture_size = size(
666 DevicePixels(texture.width() as i32),
667 DevicePixels(texture.height() as i32),
668 );
669
670 command_encoder.set_vertex_buffer(
671 SpriteInputIndex::Sprites as u64,
672 Some(instance_buffer),
673 *instance_offset as u64,
674 );
675 command_encoder.set_vertex_bytes(
676 SpriteInputIndex::AtlasTextureSize as u64,
677 mem::size_of_val(&texture_size) as u64,
678 &texture_size as *const Size<DevicePixels> as *const _,
679 );
680 command_encoder.set_fragment_buffer(
681 SpriteInputIndex::Sprites as u64,
682 Some(instance_buffer),
683 *instance_offset as u64,
684 );
685 command_encoder
686 .set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
687
688 let sprite_bytes_len = mem::size_of_val(sprites.as_slice());
689 let next_offset = *instance_offset + sprite_bytes_len;
690 if next_offset > INSTANCE_BUFFER_SIZE {
691 return false;
692 }
693
694 let buffer_contents =
695 unsafe { (instance_buffer.contents() as *mut u8).add(*instance_offset) };
696
697 unsafe {
698 ptr::copy_nonoverlapping(
699 sprites.as_ptr() as *const u8,
700 buffer_contents,
701 sprite_bytes_len,
702 );
703 }
704
705 command_encoder.draw_primitives_instanced(
706 metal::MTLPrimitiveType::Triangle,
707 0,
708 6,
709 sprites.len() as u64,
710 );
711 *instance_offset = next_offset;
712 sprites.clear();
713 }
714 }
715 true
716 }
717
718 fn draw_underlines(
719 &mut self,
720 underlines: &[Underline],
721 instance_buffer: &mut metal::Buffer,
722 instance_offset: &mut usize,
723 viewport_size: Size<DevicePixels>,
724 command_encoder: &metal::RenderCommandEncoderRef,
725 ) -> bool {
726 if underlines.is_empty() {
727 return true;
728 }
729 align_offset(instance_offset);
730
731 command_encoder.set_render_pipeline_state(&self.underlines_pipeline_state);
732 command_encoder.set_vertex_buffer(
733 UnderlineInputIndex::Vertices as u64,
734 Some(&self.unit_vertices),
735 0,
736 );
737 command_encoder.set_vertex_buffer(
738 UnderlineInputIndex::Underlines as u64,
739 Some(instance_buffer),
740 *instance_offset as u64,
741 );
742 command_encoder.set_fragment_buffer(
743 UnderlineInputIndex::Underlines as u64,
744 Some(instance_buffer),
745 *instance_offset as u64,
746 );
747
748 command_encoder.set_vertex_bytes(
749 UnderlineInputIndex::ViewportSize as u64,
750 mem::size_of_val(&viewport_size) as u64,
751 &viewport_size as *const Size<DevicePixels> as *const _,
752 );
753
754 let underline_bytes_len = mem::size_of_val(underlines);
755 let buffer_contents =
756 unsafe { (instance_buffer.contents() as *mut u8).add(*instance_offset) };
757
758 let next_offset = *instance_offset + underline_bytes_len;
759 if next_offset > INSTANCE_BUFFER_SIZE {
760 return false;
761 }
762
763 unsafe {
764 ptr::copy_nonoverlapping(
765 underlines.as_ptr() as *const u8,
766 buffer_contents,
767 underline_bytes_len,
768 );
769 }
770
771 command_encoder.draw_primitives_instanced(
772 metal::MTLPrimitiveType::Triangle,
773 0,
774 6,
775 underlines.len() as u64,
776 );
777 *instance_offset = next_offset;
778 true
779 }
780
781 fn draw_monochrome_sprites(
782 &mut self,
783 texture_id: AtlasTextureId,
784 sprites: &[MonochromeSprite],
785 instance_buffer: &mut metal::Buffer,
786 instance_offset: &mut usize,
787 viewport_size: Size<DevicePixels>,
788 command_encoder: &metal::RenderCommandEncoderRef,
789 ) -> bool {
790 if sprites.is_empty() {
791 return true;
792 }
793 align_offset(instance_offset);
794
795 let texture = self.sprite_atlas.metal_texture(texture_id);
796 let texture_size = size(
797 DevicePixels(texture.width() as i32),
798 DevicePixels(texture.height() as i32),
799 );
800 command_encoder.set_render_pipeline_state(&self.monochrome_sprites_pipeline_state);
801 command_encoder.set_vertex_buffer(
802 SpriteInputIndex::Vertices as u64,
803 Some(&self.unit_vertices),
804 0,
805 );
806 command_encoder.set_vertex_buffer(
807 SpriteInputIndex::Sprites as u64,
808 Some(instance_buffer),
809 *instance_offset as u64,
810 );
811 command_encoder.set_vertex_bytes(
812 SpriteInputIndex::ViewportSize as u64,
813 mem::size_of_val(&viewport_size) as u64,
814 &viewport_size as *const Size<DevicePixels> as *const _,
815 );
816 command_encoder.set_vertex_bytes(
817 SpriteInputIndex::AtlasTextureSize as u64,
818 mem::size_of_val(&texture_size) as u64,
819 &texture_size as *const Size<DevicePixels> as *const _,
820 );
821 command_encoder.set_fragment_buffer(
822 SpriteInputIndex::Sprites as u64,
823 Some(instance_buffer),
824 *instance_offset as u64,
825 );
826 command_encoder.set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
827
828 let sprite_bytes_len = mem::size_of_val(sprites);
829 let buffer_contents =
830 unsafe { (instance_buffer.contents() as *mut u8).add(*instance_offset) };
831
832 let next_offset = *instance_offset + sprite_bytes_len;
833 if next_offset > INSTANCE_BUFFER_SIZE {
834 return false;
835 }
836
837 unsafe {
838 ptr::copy_nonoverlapping(
839 sprites.as_ptr() as *const u8,
840 buffer_contents,
841 sprite_bytes_len,
842 );
843 }
844
845 command_encoder.draw_primitives_instanced(
846 metal::MTLPrimitiveType::Triangle,
847 0,
848 6,
849 sprites.len() as u64,
850 );
851 *instance_offset = next_offset;
852 true
853 }
854
855 fn draw_polychrome_sprites(
856 &mut self,
857 texture_id: AtlasTextureId,
858 sprites: &[PolychromeSprite],
859 instance_buffer: &mut metal::Buffer,
860 instance_offset: &mut usize,
861 viewport_size: Size<DevicePixels>,
862 command_encoder: &metal::RenderCommandEncoderRef,
863 ) -> bool {
864 if sprites.is_empty() {
865 return true;
866 }
867 align_offset(instance_offset);
868
869 let texture = self.sprite_atlas.metal_texture(texture_id);
870 let texture_size = size(
871 DevicePixels(texture.width() as i32),
872 DevicePixels(texture.height() as i32),
873 );
874 command_encoder.set_render_pipeline_state(&self.polychrome_sprites_pipeline_state);
875 command_encoder.set_vertex_buffer(
876 SpriteInputIndex::Vertices as u64,
877 Some(&self.unit_vertices),
878 0,
879 );
880 command_encoder.set_vertex_buffer(
881 SpriteInputIndex::Sprites as u64,
882 Some(instance_buffer),
883 *instance_offset as u64,
884 );
885 command_encoder.set_vertex_bytes(
886 SpriteInputIndex::ViewportSize as u64,
887 mem::size_of_val(&viewport_size) as u64,
888 &viewport_size as *const Size<DevicePixels> as *const _,
889 );
890 command_encoder.set_vertex_bytes(
891 SpriteInputIndex::AtlasTextureSize as u64,
892 mem::size_of_val(&texture_size) as u64,
893 &texture_size as *const Size<DevicePixels> as *const _,
894 );
895 command_encoder.set_fragment_buffer(
896 SpriteInputIndex::Sprites as u64,
897 Some(instance_buffer),
898 *instance_offset as u64,
899 );
900 command_encoder.set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
901
902 let sprite_bytes_len = mem::size_of_val(sprites);
903 let buffer_contents =
904 unsafe { (instance_buffer.contents() as *mut u8).add(*instance_offset) };
905
906 let next_offset = *instance_offset + sprite_bytes_len;
907 if next_offset > INSTANCE_BUFFER_SIZE {
908 return false;
909 }
910
911 unsafe {
912 ptr::copy_nonoverlapping(
913 sprites.as_ptr() as *const u8,
914 buffer_contents,
915 sprite_bytes_len,
916 );
917 }
918
919 command_encoder.draw_primitives_instanced(
920 metal::MTLPrimitiveType::Triangle,
921 0,
922 6,
923 sprites.len() as u64,
924 );
925 *instance_offset = next_offset;
926 true
927 }
928
929 fn draw_surfaces(
930 &mut self,
931 surfaces: &[Surface],
932 instance_buffer: &mut metal::Buffer,
933 instance_offset: &mut usize,
934 viewport_size: Size<DevicePixels>,
935 command_encoder: &metal::RenderCommandEncoderRef,
936 ) -> bool {
937 command_encoder.set_render_pipeline_state(&self.surfaces_pipeline_state);
938 command_encoder.set_vertex_buffer(
939 SurfaceInputIndex::Vertices as u64,
940 Some(&self.unit_vertices),
941 0,
942 );
943 command_encoder.set_vertex_bytes(
944 SurfaceInputIndex::ViewportSize as u64,
945 mem::size_of_val(&viewport_size) as u64,
946 &viewport_size as *const Size<DevicePixels> as *const _,
947 );
948
949 for surface in surfaces {
950 let texture_size = size(
951 DevicePixels::from(surface.image_buffer.width() as i32),
952 DevicePixels::from(surface.image_buffer.height() as i32),
953 );
954
955 assert_eq!(
956 surface.image_buffer.pixel_format_type(),
957 media::core_video::kCVPixelFormatType_420YpCbCr8BiPlanarFullRange
958 );
959
960 let y_texture = unsafe {
961 self.core_video_texture_cache
962 .create_texture_from_image(
963 surface.image_buffer.as_concrete_TypeRef(),
964 ptr::null(),
965 MTLPixelFormat::R8Unorm,
966 surface.image_buffer.plane_width(0),
967 surface.image_buffer.plane_height(0),
968 0,
969 )
970 .unwrap()
971 };
972 let cb_cr_texture = unsafe {
973 self.core_video_texture_cache
974 .create_texture_from_image(
975 surface.image_buffer.as_concrete_TypeRef(),
976 ptr::null(),
977 MTLPixelFormat::RG8Unorm,
978 surface.image_buffer.plane_width(1),
979 surface.image_buffer.plane_height(1),
980 1,
981 )
982 .unwrap()
983 };
984
985 align_offset(instance_offset);
986 let next_offset = *instance_offset + mem::size_of::<Surface>();
987 if next_offset > INSTANCE_BUFFER_SIZE {
988 return false;
989 }
990
991 command_encoder.set_vertex_buffer(
992 SurfaceInputIndex::Surfaces as u64,
993 Some(instance_buffer),
994 *instance_offset as u64,
995 );
996 command_encoder.set_vertex_bytes(
997 SurfaceInputIndex::TextureSize as u64,
998 mem::size_of_val(&texture_size) as u64,
999 &texture_size as *const Size<DevicePixels> as *const _,
1000 );
1001 command_encoder.set_fragment_texture(
1002 SurfaceInputIndex::YTexture as u64,
1003 Some(y_texture.as_texture_ref()),
1004 );
1005 command_encoder.set_fragment_texture(
1006 SurfaceInputIndex::CbCrTexture as u64,
1007 Some(cb_cr_texture.as_texture_ref()),
1008 );
1009
1010 unsafe {
1011 let buffer_contents = (instance_buffer.contents() as *mut u8).add(*instance_offset)
1012 as *mut SurfaceBounds;
1013 ptr::write(
1014 buffer_contents,
1015 SurfaceBounds {
1016 bounds: surface.bounds,
1017 content_mask: surface.content_mask.clone(),
1018 },
1019 );
1020 }
1021
1022 command_encoder.draw_primitives(metal::MTLPrimitiveType::Triangle, 0, 6);
1023 *instance_offset = next_offset;
1024 }
1025 true
1026 }
1027}
1028
1029fn build_pipeline_state(
1030 device: &metal::DeviceRef,
1031 library: &metal::LibraryRef,
1032 label: &str,
1033 vertex_fn_name: &str,
1034 fragment_fn_name: &str,
1035 pixel_format: metal::MTLPixelFormat,
1036) -> metal::RenderPipelineState {
1037 let vertex_fn = library
1038 .get_function(vertex_fn_name, None)
1039 .expect("error locating vertex function");
1040 let fragment_fn = library
1041 .get_function(fragment_fn_name, None)
1042 .expect("error locating fragment function");
1043
1044 let descriptor = metal::RenderPipelineDescriptor::new();
1045 descriptor.set_label(label);
1046 descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
1047 descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
1048 let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
1049 color_attachment.set_pixel_format(pixel_format);
1050 color_attachment.set_blending_enabled(true);
1051 color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
1052 color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
1053 color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::SourceAlpha);
1054 color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::One);
1055 color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
1056 color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::One);
1057
1058 device
1059 .new_render_pipeline_state(&descriptor)
1060 .expect("could not create render pipeline state")
1061}
1062
1063fn build_path_rasterization_pipeline_state(
1064 device: &metal::DeviceRef,
1065 library: &metal::LibraryRef,
1066 label: &str,
1067 vertex_fn_name: &str,
1068 fragment_fn_name: &str,
1069 pixel_format: metal::MTLPixelFormat,
1070) -> metal::RenderPipelineState {
1071 let vertex_fn = library
1072 .get_function(vertex_fn_name, None)
1073 .expect("error locating vertex function");
1074 let fragment_fn = library
1075 .get_function(fragment_fn_name, None)
1076 .expect("error locating fragment function");
1077
1078 let descriptor = metal::RenderPipelineDescriptor::new();
1079 descriptor.set_label(label);
1080 descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
1081 descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
1082 let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
1083 color_attachment.set_pixel_format(pixel_format);
1084 color_attachment.set_blending_enabled(true);
1085 color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
1086 color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
1087 color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::One);
1088 color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::One);
1089 color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::One);
1090 color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::One);
1091
1092 device
1093 .new_render_pipeline_state(&descriptor)
1094 .expect("could not create render pipeline state")
1095}
1096
1097// Align to multiples of 256 make Metal happy.
1098fn align_offset(offset: &mut usize) {
1099 *offset = ((*offset + 255) / 256) * 256;
1100}
1101
1102#[repr(C)]
1103enum ShadowInputIndex {
1104 Vertices = 0,
1105 Shadows = 1,
1106 ViewportSize = 2,
1107}
1108
1109#[repr(C)]
1110enum QuadInputIndex {
1111 Vertices = 0,
1112 Quads = 1,
1113 ViewportSize = 2,
1114}
1115
1116#[repr(C)]
1117enum UnderlineInputIndex {
1118 Vertices = 0,
1119 Underlines = 1,
1120 ViewportSize = 2,
1121}
1122
1123#[repr(C)]
1124enum SpriteInputIndex {
1125 Vertices = 0,
1126 Sprites = 1,
1127 ViewportSize = 2,
1128 AtlasTextureSize = 3,
1129 AtlasTexture = 4,
1130}
1131
1132#[repr(C)]
1133enum SurfaceInputIndex {
1134 Vertices = 0,
1135 Surfaces = 1,
1136 ViewportSize = 2,
1137 TextureSize = 3,
1138 YTexture = 4,
1139 CbCrTexture = 5,
1140}
1141
1142#[repr(C)]
1143enum PathRasterizationInputIndex {
1144 Vertices = 0,
1145 AtlasTextureSize = 1,
1146}
1147
1148#[derive(Clone, Debug, Eq, PartialEq)]
1149#[repr(C)]
1150pub struct PathSprite {
1151 pub bounds: Bounds<ScaledPixels>,
1152 pub color: Hsla,
1153 pub tile: AtlasTile,
1154}
1155
1156#[derive(Clone, Debug, Eq, PartialEq)]
1157#[repr(C)]
1158pub struct SurfaceBounds {
1159 pub bounds: Bounds<ScaledPixels>,
1160 pub content_mask: ContentMask<ScaledPixels>,
1161}