1use crate::{
2 platform::mac::ns_string, point, size, AtlasTextureId, AtlasTextureKind, AtlasTile, Bounds,
3 ContentMask, DevicePixels, Hsla, MetalAtlas, MonochromeSprite, Path, PathId, PathVertex,
4 PolychromeSprite, PrimitiveBatch, Quad, ScaledPixels, Scene, Shadow, Size, Surface, Underline,
5};
6use block::ConcreteBlock;
7use cocoa::{
8 base::{nil, NO, YES},
9 foundation::{NSDictionary, NSUInteger},
10 quartzcore::AutoresizingMask,
11};
12use collections::HashMap;
13use core_foundation::base::TCFType;
14use foreign_types::ForeignType;
15use media::core_video::CVMetalTextureCache;
16use metal::{CommandQueue, MTLPixelFormat, MTLResourceOptions, NSRange};
17use objc::{self, msg_send, sel, sel_impl};
18use parking_lot::Mutex;
19use smallvec::SmallVec;
20use std::{cell::Cell, ffi::c_void, mem, ptr, sync::Arc};
21
22// Exported to metal
23pub(crate) type PointF = crate::Point<f32>;
24
25#[cfg(not(feature = "runtime_shaders"))]
26const SHADERS_METALLIB: &[u8] = include_bytes!(concat!(env!("OUT_DIR"), "/shaders.metallib"));
27#[cfg(feature = "runtime_shaders")]
28const SHADERS_SOURCE_FILE: &'static str =
29 include_str!(concat!(env!("OUT_DIR"), "/stitched_shaders.metal"));
30const INSTANCE_BUFFER_SIZE: usize = 2 * 1024 * 1024; // This is an arbitrary decision. There's probably a more optimal value (maybe even we could adjust dynamically...)
31
32pub(crate) struct MetalRenderer {
33 device: metal::Device,
34 layer: metal::MetalLayer,
35 presents_with_transaction: bool,
36 command_queue: CommandQueue,
37 paths_rasterization_pipeline_state: metal::RenderPipelineState,
38 path_sprites_pipeline_state: metal::RenderPipelineState,
39 shadows_pipeline_state: metal::RenderPipelineState,
40 quads_pipeline_state: metal::RenderPipelineState,
41 underlines_pipeline_state: metal::RenderPipelineState,
42 monochrome_sprites_pipeline_state: metal::RenderPipelineState,
43 polychrome_sprites_pipeline_state: metal::RenderPipelineState,
44 surfaces_pipeline_state: metal::RenderPipelineState,
45 unit_vertices: metal::Buffer,
46 #[allow(clippy::arc_with_non_send_sync)]
47 instance_buffer_pool: Arc<Mutex<Vec<metal::Buffer>>>,
48 sprite_atlas: Arc<MetalAtlas>,
49 core_video_texture_cache: CVMetalTextureCache,
50}
51
52impl MetalRenderer {
53 pub fn new(instance_buffer_pool: Arc<Mutex<Vec<metal::Buffer>>>) -> Self {
54 let device: metal::Device = if let Some(device) = metal::Device::system_default() {
55 device
56 } else {
57 log::error!("unable to access a compatible graphics device");
58 std::process::exit(1);
59 };
60
61 let layer = metal::MetalLayer::new();
62 layer.set_device(&device);
63 layer.set_pixel_format(MTLPixelFormat::BGRA8Unorm);
64 layer.set_opaque(true);
65 layer.set_maximum_drawable_count(3);
66 unsafe {
67 let _: () = msg_send![&*layer, setAllowsNextDrawableTimeout: NO];
68 let _: () = msg_send![&*layer, setNeedsDisplayOnBoundsChange: YES];
69 let _: () = msg_send![
70 &*layer,
71 setAutoresizingMask: AutoresizingMask::WIDTH_SIZABLE
72 | AutoresizingMask::HEIGHT_SIZABLE
73 ];
74 }
75 #[cfg(feature = "runtime_shaders")]
76 let library = device
77 .new_library_with_source(&SHADERS_SOURCE_FILE, &metal::CompileOptions::new())
78 .expect("error building metal library");
79 #[cfg(not(feature = "runtime_shaders"))]
80 let library = device
81 .new_library_with_data(SHADERS_METALLIB)
82 .expect("error building metal library");
83
84 fn to_float2_bits(point: PointF) -> u64 {
85 let mut output = point.y.to_bits() as u64;
86 output <<= 32;
87 output |= point.x.to_bits() as u64;
88 output
89 }
90
91 let unit_vertices = [
92 to_float2_bits(point(0., 0.)),
93 to_float2_bits(point(1., 0.)),
94 to_float2_bits(point(0., 1.)),
95 to_float2_bits(point(0., 1.)),
96 to_float2_bits(point(1., 0.)),
97 to_float2_bits(point(1., 1.)),
98 ];
99 let unit_vertices = device.new_buffer_with_data(
100 unit_vertices.as_ptr() as *const c_void,
101 mem::size_of_val(&unit_vertices) as u64,
102 MTLResourceOptions::StorageModeManaged,
103 );
104
105 let paths_rasterization_pipeline_state = build_path_rasterization_pipeline_state(
106 &device,
107 &library,
108 "paths_rasterization",
109 "path_rasterization_vertex",
110 "path_rasterization_fragment",
111 MTLPixelFormat::R16Float,
112 );
113 let path_sprites_pipeline_state = build_pipeline_state(
114 &device,
115 &library,
116 "path_sprites",
117 "path_sprite_vertex",
118 "path_sprite_fragment",
119 MTLPixelFormat::BGRA8Unorm,
120 );
121 let shadows_pipeline_state = build_pipeline_state(
122 &device,
123 &library,
124 "shadows",
125 "shadow_vertex",
126 "shadow_fragment",
127 MTLPixelFormat::BGRA8Unorm,
128 );
129 let quads_pipeline_state = build_pipeline_state(
130 &device,
131 &library,
132 "quads",
133 "quad_vertex",
134 "quad_fragment",
135 MTLPixelFormat::BGRA8Unorm,
136 );
137 let underlines_pipeline_state = build_pipeline_state(
138 &device,
139 &library,
140 "underlines",
141 "underline_vertex",
142 "underline_fragment",
143 MTLPixelFormat::BGRA8Unorm,
144 );
145 let monochrome_sprites_pipeline_state = build_pipeline_state(
146 &device,
147 &library,
148 "monochrome_sprites",
149 "monochrome_sprite_vertex",
150 "monochrome_sprite_fragment",
151 MTLPixelFormat::BGRA8Unorm,
152 );
153 let polychrome_sprites_pipeline_state = build_pipeline_state(
154 &device,
155 &library,
156 "polychrome_sprites",
157 "polychrome_sprite_vertex",
158 "polychrome_sprite_fragment",
159 MTLPixelFormat::BGRA8Unorm,
160 );
161 let surfaces_pipeline_state = build_pipeline_state(
162 &device,
163 &library,
164 "surfaces",
165 "surface_vertex",
166 "surface_fragment",
167 MTLPixelFormat::BGRA8Unorm,
168 );
169
170 let command_queue = device.new_command_queue();
171 let sprite_atlas = Arc::new(MetalAtlas::new(device.clone()));
172 let core_video_texture_cache =
173 unsafe { CVMetalTextureCache::new(device.as_ptr()).unwrap() };
174
175 Self {
176 device,
177 layer,
178 presents_with_transaction: false,
179 command_queue,
180 paths_rasterization_pipeline_state,
181 path_sprites_pipeline_state,
182 shadows_pipeline_state,
183 quads_pipeline_state,
184 underlines_pipeline_state,
185 monochrome_sprites_pipeline_state,
186 polychrome_sprites_pipeline_state,
187 surfaces_pipeline_state,
188 unit_vertices,
189 instance_buffer_pool,
190 sprite_atlas,
191 core_video_texture_cache,
192 }
193 }
194
195 pub fn layer(&self) -> &metal::MetalLayerRef {
196 &self.layer
197 }
198
199 pub fn sprite_atlas(&self) -> &Arc<MetalAtlas> {
200 &self.sprite_atlas
201 }
202
203 /// Enables or disables the Metal HUD for debugging purposes. Note that this only works
204 /// when the app is bundled and it has the `MetalHudEnabled` key set to true in Info.plist.
205 pub fn set_hud_enabled(&mut self, enabled: bool) {
206 unsafe {
207 if enabled {
208 let hud_properties = NSDictionary::dictionaryWithObject_forKey_(
209 nil,
210 ns_string("default"),
211 ns_string("mode"),
212 );
213 let _: () = msg_send![&*self.layer, setDeveloperHUDProperties: hud_properties];
214 } else {
215 let _: () = msg_send![&*self.layer, setDeveloperHUDProperties: NSDictionary::dictionary(nil)];
216 }
217 }
218 }
219
220 pub fn set_presents_with_transaction(&mut self, presents_with_transaction: bool) {
221 self.presents_with_transaction = presents_with_transaction;
222 self.layer
223 .set_presents_with_transaction(presents_with_transaction);
224 }
225
226 pub fn draw(&mut self, scene: &Scene) {
227 let layer = self.layer.clone();
228 let viewport_size = layer.drawable_size();
229 let viewport_size: Size<DevicePixels> = size(
230 (viewport_size.width.ceil() as i32).into(),
231 (viewport_size.height.ceil() as i32).into(),
232 );
233 let drawable = if let Some(drawable) = layer.next_drawable() {
234 drawable
235 } else {
236 log::error!(
237 "failed to retrieve next drawable, drawable size: {:?}",
238 viewport_size
239 );
240 return;
241 };
242 let mut instance_buffer = self.instance_buffer_pool.lock().pop().unwrap_or_else(|| {
243 self.device.new_buffer(
244 INSTANCE_BUFFER_SIZE as u64,
245 MTLResourceOptions::StorageModeManaged,
246 )
247 });
248 let command_queue = self.command_queue.clone();
249 let command_buffer = command_queue.new_command_buffer();
250 let mut instance_offset = 0;
251
252 let Some(path_tiles) = self.rasterize_paths(
253 scene.paths(),
254 &mut instance_buffer,
255 &mut instance_offset,
256 command_buffer,
257 ) else {
258 log::error!("failed to rasterize {} paths", scene.paths().len());
259 return;
260 };
261
262 let render_pass_descriptor = metal::RenderPassDescriptor::new();
263 let color_attachment = render_pass_descriptor
264 .color_attachments()
265 .object_at(0)
266 .unwrap();
267
268 color_attachment.set_texture(Some(drawable.texture()));
269 color_attachment.set_load_action(metal::MTLLoadAction::Clear);
270 color_attachment.set_store_action(metal::MTLStoreAction::Store);
271 let alpha = if self.layer.is_opaque() { 1. } else { 0. };
272 color_attachment.set_clear_color(metal::MTLClearColor::new(0., 0., 0., alpha));
273 let command_encoder = command_buffer.new_render_command_encoder(render_pass_descriptor);
274
275 command_encoder.set_viewport(metal::MTLViewport {
276 originX: 0.0,
277 originY: 0.0,
278 width: i32::from(viewport_size.width) as f64,
279 height: i32::from(viewport_size.height) as f64,
280 znear: 0.0,
281 zfar: 1.0,
282 });
283 for batch in scene.batches() {
284 let ok = match batch {
285 PrimitiveBatch::Shadows(shadows) => self.draw_shadows(
286 shadows,
287 &mut instance_buffer,
288 &mut instance_offset,
289 viewport_size,
290 command_encoder,
291 ),
292 PrimitiveBatch::Quads(quads) => self.draw_quads(
293 quads,
294 &mut instance_buffer,
295 &mut instance_offset,
296 viewport_size,
297 command_encoder,
298 ),
299 PrimitiveBatch::Paths(paths) => self.draw_paths(
300 paths,
301 &path_tiles,
302 &mut instance_buffer,
303 &mut instance_offset,
304 viewport_size,
305 command_encoder,
306 ),
307 PrimitiveBatch::Underlines(underlines) => self.draw_underlines(
308 underlines,
309 &mut instance_buffer,
310 &mut instance_offset,
311 viewport_size,
312 command_encoder,
313 ),
314 PrimitiveBatch::MonochromeSprites {
315 texture_id,
316 sprites,
317 } => self.draw_monochrome_sprites(
318 texture_id,
319 sprites,
320 &mut instance_buffer,
321 &mut instance_offset,
322 viewport_size,
323 command_encoder,
324 ),
325 PrimitiveBatch::PolychromeSprites {
326 texture_id,
327 sprites,
328 } => self.draw_polychrome_sprites(
329 texture_id,
330 sprites,
331 &mut instance_buffer,
332 &mut instance_offset,
333 viewport_size,
334 command_encoder,
335 ),
336 PrimitiveBatch::Surfaces(surfaces) => self.draw_surfaces(
337 surfaces,
338 &mut instance_buffer,
339 &mut instance_offset,
340 viewport_size,
341 command_encoder,
342 ),
343 };
344
345 if !ok {
346 log::error!("scene too large: {} paths, {} shadows, {} quads, {} underlines, {} mono, {} poly, {} surfaces",
347 scene.paths.len(),
348 scene.shadows.len(),
349 scene.quads.len(),
350 scene.underlines.len(),
351 scene.monochrome_sprites.len(),
352 scene.polychrome_sprites.len(),
353 scene.surfaces.len(),
354 );
355 break;
356 }
357 }
358
359 command_encoder.end_encoding();
360
361 instance_buffer.did_modify_range(NSRange {
362 location: 0,
363 length: instance_offset as NSUInteger,
364 });
365
366 let instance_buffer_pool = self.instance_buffer_pool.clone();
367 let instance_buffer = Cell::new(Some(instance_buffer));
368 let block = ConcreteBlock::new(move |_| {
369 if let Some(instance_buffer) = instance_buffer.take() {
370 instance_buffer_pool.lock().push(instance_buffer);
371 }
372 });
373 let block = block.copy();
374 command_buffer.add_completed_handler(&block);
375
376 self.sprite_atlas.clear_textures(AtlasTextureKind::Path);
377
378 if self.presents_with_transaction {
379 command_buffer.commit();
380 command_buffer.wait_until_scheduled();
381 drawable.present();
382 } else {
383 command_buffer.present_drawable(drawable);
384 command_buffer.commit();
385 }
386 }
387
388 fn rasterize_paths(
389 &mut self,
390 paths: &[Path<ScaledPixels>],
391 instance_buffer: &mut metal::Buffer,
392 instance_offset: &mut usize,
393 command_buffer: &metal::CommandBufferRef,
394 ) -> Option<HashMap<PathId, AtlasTile>> {
395 let mut tiles = HashMap::default();
396 let mut vertices_by_texture_id = HashMap::default();
397 for path in paths {
398 let clipped_bounds = path.bounds.intersect(&path.content_mask.bounds);
399
400 let tile = self
401 .sprite_atlas
402 .allocate(clipped_bounds.size.map(Into::into), AtlasTextureKind::Path);
403 vertices_by_texture_id
404 .entry(tile.texture_id)
405 .or_insert(Vec::new())
406 .extend(path.vertices.iter().map(|vertex| PathVertex {
407 xy_position: vertex.xy_position - clipped_bounds.origin
408 + tile.bounds.origin.map(Into::into),
409 st_position: vertex.st_position,
410 content_mask: ContentMask {
411 bounds: tile.bounds.map(Into::into),
412 },
413 }));
414 tiles.insert(path.id, tile);
415 }
416
417 for (texture_id, vertices) in vertices_by_texture_id {
418 align_offset(instance_offset);
419 let vertices_bytes_len = mem::size_of_val(vertices.as_slice());
420 let next_offset = *instance_offset + vertices_bytes_len;
421 if next_offset > INSTANCE_BUFFER_SIZE {
422 return None;
423 }
424
425 let render_pass_descriptor = metal::RenderPassDescriptor::new();
426 let color_attachment = render_pass_descriptor
427 .color_attachments()
428 .object_at(0)
429 .unwrap();
430
431 let texture = self.sprite_atlas.metal_texture(texture_id);
432 color_attachment.set_texture(Some(&texture));
433 color_attachment.set_load_action(metal::MTLLoadAction::Clear);
434 color_attachment.set_store_action(metal::MTLStoreAction::Store);
435 color_attachment.set_clear_color(metal::MTLClearColor::new(0., 0., 0., 1.));
436 let command_encoder = command_buffer.new_render_command_encoder(render_pass_descriptor);
437 command_encoder.set_render_pipeline_state(&self.paths_rasterization_pipeline_state);
438 command_encoder.set_vertex_buffer(
439 PathRasterizationInputIndex::Vertices as u64,
440 Some(instance_buffer),
441 *instance_offset as u64,
442 );
443 let texture_size = Size {
444 width: DevicePixels::from(texture.width()),
445 height: DevicePixels::from(texture.height()),
446 };
447 command_encoder.set_vertex_bytes(
448 PathRasterizationInputIndex::AtlasTextureSize as u64,
449 mem::size_of_val(&texture_size) as u64,
450 &texture_size as *const Size<DevicePixels> as *const _,
451 );
452
453 let buffer_contents =
454 unsafe { (instance_buffer.contents() as *mut u8).add(*instance_offset) };
455 unsafe {
456 ptr::copy_nonoverlapping(
457 vertices.as_ptr() as *const u8,
458 buffer_contents,
459 vertices_bytes_len,
460 );
461 }
462
463 command_encoder.draw_primitives(
464 metal::MTLPrimitiveType::Triangle,
465 0,
466 vertices.len() as u64,
467 );
468 command_encoder.end_encoding();
469 *instance_offset = next_offset;
470 }
471
472 Some(tiles)
473 }
474
475 fn draw_shadows(
476 &mut self,
477 shadows: &[Shadow],
478 instance_buffer: &mut metal::Buffer,
479 instance_offset: &mut usize,
480 viewport_size: Size<DevicePixels>,
481 command_encoder: &metal::RenderCommandEncoderRef,
482 ) -> bool {
483 if shadows.is_empty() {
484 return true;
485 }
486 align_offset(instance_offset);
487
488 command_encoder.set_render_pipeline_state(&self.shadows_pipeline_state);
489 command_encoder.set_vertex_buffer(
490 ShadowInputIndex::Vertices as u64,
491 Some(&self.unit_vertices),
492 0,
493 );
494 command_encoder.set_vertex_buffer(
495 ShadowInputIndex::Shadows as u64,
496 Some(instance_buffer),
497 *instance_offset as u64,
498 );
499 command_encoder.set_fragment_buffer(
500 ShadowInputIndex::Shadows as u64,
501 Some(instance_buffer),
502 *instance_offset as u64,
503 );
504
505 command_encoder.set_vertex_bytes(
506 ShadowInputIndex::ViewportSize as u64,
507 mem::size_of_val(&viewport_size) as u64,
508 &viewport_size as *const Size<DevicePixels> as *const _,
509 );
510
511 let shadow_bytes_len = mem::size_of_val(shadows);
512 let buffer_contents =
513 unsafe { (instance_buffer.contents() as *mut u8).add(*instance_offset) };
514
515 let next_offset = *instance_offset + shadow_bytes_len;
516 if next_offset > INSTANCE_BUFFER_SIZE {
517 return false;
518 }
519
520 unsafe {
521 ptr::copy_nonoverlapping(
522 shadows.as_ptr() as *const u8,
523 buffer_contents,
524 shadow_bytes_len,
525 );
526 }
527
528 command_encoder.draw_primitives_instanced(
529 metal::MTLPrimitiveType::Triangle,
530 0,
531 6,
532 shadows.len() as u64,
533 );
534 *instance_offset = next_offset;
535 true
536 }
537
538 fn draw_quads(
539 &mut self,
540 quads: &[Quad],
541 instance_buffer: &mut metal::Buffer,
542 instance_offset: &mut usize,
543 viewport_size: Size<DevicePixels>,
544 command_encoder: &metal::RenderCommandEncoderRef,
545 ) -> bool {
546 if quads.is_empty() {
547 return true;
548 }
549 align_offset(instance_offset);
550
551 command_encoder.set_render_pipeline_state(&self.quads_pipeline_state);
552 command_encoder.set_vertex_buffer(
553 QuadInputIndex::Vertices as u64,
554 Some(&self.unit_vertices),
555 0,
556 );
557 command_encoder.set_vertex_buffer(
558 QuadInputIndex::Quads as u64,
559 Some(instance_buffer),
560 *instance_offset as u64,
561 );
562 command_encoder.set_fragment_buffer(
563 QuadInputIndex::Quads as u64,
564 Some(instance_buffer),
565 *instance_offset as u64,
566 );
567
568 command_encoder.set_vertex_bytes(
569 QuadInputIndex::ViewportSize as u64,
570 mem::size_of_val(&viewport_size) as u64,
571 &viewport_size as *const Size<DevicePixels> as *const _,
572 );
573
574 let quad_bytes_len = mem::size_of_val(quads);
575 let buffer_contents =
576 unsafe { (instance_buffer.contents() as *mut u8).add(*instance_offset) };
577
578 let next_offset = *instance_offset + quad_bytes_len;
579 if next_offset > INSTANCE_BUFFER_SIZE {
580 return false;
581 }
582
583 unsafe {
584 ptr::copy_nonoverlapping(quads.as_ptr() as *const u8, buffer_contents, quad_bytes_len);
585 }
586
587 command_encoder.draw_primitives_instanced(
588 metal::MTLPrimitiveType::Triangle,
589 0,
590 6,
591 quads.len() as u64,
592 );
593 *instance_offset = next_offset;
594 true
595 }
596
597 fn draw_paths(
598 &mut self,
599 paths: &[Path<ScaledPixels>],
600 tiles_by_path_id: &HashMap<PathId, AtlasTile>,
601 instance_buffer: &mut metal::Buffer,
602 instance_offset: &mut usize,
603 viewport_size: Size<DevicePixels>,
604 command_encoder: &metal::RenderCommandEncoderRef,
605 ) -> bool {
606 if paths.is_empty() {
607 return true;
608 }
609
610 command_encoder.set_render_pipeline_state(&self.path_sprites_pipeline_state);
611 command_encoder.set_vertex_buffer(
612 SpriteInputIndex::Vertices as u64,
613 Some(&self.unit_vertices),
614 0,
615 );
616 command_encoder.set_vertex_bytes(
617 SpriteInputIndex::ViewportSize as u64,
618 mem::size_of_val(&viewport_size) as u64,
619 &viewport_size as *const Size<DevicePixels> as *const _,
620 );
621
622 let mut prev_texture_id = None;
623 let mut sprites = SmallVec::<[_; 1]>::new();
624 let mut paths_and_tiles = paths
625 .iter()
626 .map(|path| (path, tiles_by_path_id.get(&path.id).unwrap()))
627 .peekable();
628
629 loop {
630 if let Some((path, tile)) = paths_and_tiles.peek() {
631 if prev_texture_id.map_or(true, |texture_id| texture_id == tile.texture_id) {
632 prev_texture_id = Some(tile.texture_id);
633 let origin = path.bounds.intersect(&path.content_mask.bounds).origin;
634 sprites.push(PathSprite {
635 bounds: Bounds {
636 origin: origin.map(|p| p.floor()),
637 size: tile.bounds.size.map(Into::into),
638 },
639 color: path.color,
640 tile: (*tile).clone(),
641 });
642 paths_and_tiles.next();
643 continue;
644 }
645 }
646
647 if sprites.is_empty() {
648 break;
649 } else {
650 align_offset(instance_offset);
651 let texture_id = prev_texture_id.take().unwrap();
652 let texture: metal::Texture = self.sprite_atlas.metal_texture(texture_id);
653 let texture_size = size(
654 DevicePixels(texture.width() as i32),
655 DevicePixels(texture.height() as i32),
656 );
657
658 command_encoder.set_vertex_buffer(
659 SpriteInputIndex::Sprites as u64,
660 Some(instance_buffer),
661 *instance_offset as u64,
662 );
663 command_encoder.set_vertex_bytes(
664 SpriteInputIndex::AtlasTextureSize as u64,
665 mem::size_of_val(&texture_size) as u64,
666 &texture_size as *const Size<DevicePixels> as *const _,
667 );
668 command_encoder.set_fragment_buffer(
669 SpriteInputIndex::Sprites as u64,
670 Some(instance_buffer),
671 *instance_offset as u64,
672 );
673 command_encoder
674 .set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
675
676 let sprite_bytes_len = mem::size_of_val(sprites.as_slice());
677 let next_offset = *instance_offset + sprite_bytes_len;
678 if next_offset > INSTANCE_BUFFER_SIZE {
679 return false;
680 }
681
682 let buffer_contents =
683 unsafe { (instance_buffer.contents() as *mut u8).add(*instance_offset) };
684
685 unsafe {
686 ptr::copy_nonoverlapping(
687 sprites.as_ptr() as *const u8,
688 buffer_contents,
689 sprite_bytes_len,
690 );
691 }
692
693 command_encoder.draw_primitives_instanced(
694 metal::MTLPrimitiveType::Triangle,
695 0,
696 6,
697 sprites.len() as u64,
698 );
699 *instance_offset = next_offset;
700 sprites.clear();
701 }
702 }
703 true
704 }
705
706 fn draw_underlines(
707 &mut self,
708 underlines: &[Underline],
709 instance_buffer: &mut metal::Buffer,
710 instance_offset: &mut usize,
711 viewport_size: Size<DevicePixels>,
712 command_encoder: &metal::RenderCommandEncoderRef,
713 ) -> bool {
714 if underlines.is_empty() {
715 return true;
716 }
717 align_offset(instance_offset);
718
719 command_encoder.set_render_pipeline_state(&self.underlines_pipeline_state);
720 command_encoder.set_vertex_buffer(
721 UnderlineInputIndex::Vertices as u64,
722 Some(&self.unit_vertices),
723 0,
724 );
725 command_encoder.set_vertex_buffer(
726 UnderlineInputIndex::Underlines as u64,
727 Some(instance_buffer),
728 *instance_offset as u64,
729 );
730 command_encoder.set_fragment_buffer(
731 UnderlineInputIndex::Underlines as u64,
732 Some(instance_buffer),
733 *instance_offset as u64,
734 );
735
736 command_encoder.set_vertex_bytes(
737 UnderlineInputIndex::ViewportSize as u64,
738 mem::size_of_val(&viewport_size) as u64,
739 &viewport_size as *const Size<DevicePixels> as *const _,
740 );
741
742 let underline_bytes_len = mem::size_of_val(underlines);
743 let buffer_contents =
744 unsafe { (instance_buffer.contents() as *mut u8).add(*instance_offset) };
745
746 let next_offset = *instance_offset + underline_bytes_len;
747 if next_offset > INSTANCE_BUFFER_SIZE {
748 return false;
749 }
750
751 unsafe {
752 ptr::copy_nonoverlapping(
753 underlines.as_ptr() as *const u8,
754 buffer_contents,
755 underline_bytes_len,
756 );
757 }
758
759 command_encoder.draw_primitives_instanced(
760 metal::MTLPrimitiveType::Triangle,
761 0,
762 6,
763 underlines.len() as u64,
764 );
765 *instance_offset = next_offset;
766 true
767 }
768
769 fn draw_monochrome_sprites(
770 &mut self,
771 texture_id: AtlasTextureId,
772 sprites: &[MonochromeSprite],
773 instance_buffer: &mut metal::Buffer,
774 instance_offset: &mut usize,
775 viewport_size: Size<DevicePixels>,
776 command_encoder: &metal::RenderCommandEncoderRef,
777 ) -> bool {
778 if sprites.is_empty() {
779 return true;
780 }
781 align_offset(instance_offset);
782
783 let texture = self.sprite_atlas.metal_texture(texture_id);
784 let texture_size = size(
785 DevicePixels(texture.width() as i32),
786 DevicePixels(texture.height() as i32),
787 );
788 command_encoder.set_render_pipeline_state(&self.monochrome_sprites_pipeline_state);
789 command_encoder.set_vertex_buffer(
790 SpriteInputIndex::Vertices as u64,
791 Some(&self.unit_vertices),
792 0,
793 );
794 command_encoder.set_vertex_buffer(
795 SpriteInputIndex::Sprites as u64,
796 Some(instance_buffer),
797 *instance_offset as u64,
798 );
799 command_encoder.set_vertex_bytes(
800 SpriteInputIndex::ViewportSize as u64,
801 mem::size_of_val(&viewport_size) as u64,
802 &viewport_size as *const Size<DevicePixels> as *const _,
803 );
804 command_encoder.set_vertex_bytes(
805 SpriteInputIndex::AtlasTextureSize as u64,
806 mem::size_of_val(&texture_size) as u64,
807 &texture_size as *const Size<DevicePixels> as *const _,
808 );
809 command_encoder.set_fragment_buffer(
810 SpriteInputIndex::Sprites as u64,
811 Some(instance_buffer),
812 *instance_offset as u64,
813 );
814 command_encoder.set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
815
816 let sprite_bytes_len = mem::size_of_val(sprites);
817 let buffer_contents =
818 unsafe { (instance_buffer.contents() as *mut u8).add(*instance_offset) };
819
820 let next_offset = *instance_offset + sprite_bytes_len;
821 if next_offset > INSTANCE_BUFFER_SIZE {
822 return false;
823 }
824
825 unsafe {
826 ptr::copy_nonoverlapping(
827 sprites.as_ptr() as *const u8,
828 buffer_contents,
829 sprite_bytes_len,
830 );
831 }
832
833 command_encoder.draw_primitives_instanced(
834 metal::MTLPrimitiveType::Triangle,
835 0,
836 6,
837 sprites.len() as u64,
838 );
839 *instance_offset = next_offset;
840 true
841 }
842
843 fn draw_polychrome_sprites(
844 &mut self,
845 texture_id: AtlasTextureId,
846 sprites: &[PolychromeSprite],
847 instance_buffer: &mut metal::Buffer,
848 instance_offset: &mut usize,
849 viewport_size: Size<DevicePixels>,
850 command_encoder: &metal::RenderCommandEncoderRef,
851 ) -> bool {
852 if sprites.is_empty() {
853 return true;
854 }
855 align_offset(instance_offset);
856
857 let texture = self.sprite_atlas.metal_texture(texture_id);
858 let texture_size = size(
859 DevicePixels(texture.width() as i32),
860 DevicePixels(texture.height() as i32),
861 );
862 command_encoder.set_render_pipeline_state(&self.polychrome_sprites_pipeline_state);
863 command_encoder.set_vertex_buffer(
864 SpriteInputIndex::Vertices as u64,
865 Some(&self.unit_vertices),
866 0,
867 );
868 command_encoder.set_vertex_buffer(
869 SpriteInputIndex::Sprites as u64,
870 Some(instance_buffer),
871 *instance_offset as u64,
872 );
873 command_encoder.set_vertex_bytes(
874 SpriteInputIndex::ViewportSize as u64,
875 mem::size_of_val(&viewport_size) as u64,
876 &viewport_size as *const Size<DevicePixels> as *const _,
877 );
878 command_encoder.set_vertex_bytes(
879 SpriteInputIndex::AtlasTextureSize as u64,
880 mem::size_of_val(&texture_size) as u64,
881 &texture_size as *const Size<DevicePixels> as *const _,
882 );
883 command_encoder.set_fragment_buffer(
884 SpriteInputIndex::Sprites as u64,
885 Some(instance_buffer),
886 *instance_offset as u64,
887 );
888 command_encoder.set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
889
890 let sprite_bytes_len = mem::size_of_val(sprites);
891 let buffer_contents =
892 unsafe { (instance_buffer.contents() as *mut u8).add(*instance_offset) };
893
894 let next_offset = *instance_offset + sprite_bytes_len;
895 if next_offset > INSTANCE_BUFFER_SIZE {
896 return false;
897 }
898
899 unsafe {
900 ptr::copy_nonoverlapping(
901 sprites.as_ptr() as *const u8,
902 buffer_contents,
903 sprite_bytes_len,
904 );
905 }
906
907 command_encoder.draw_primitives_instanced(
908 metal::MTLPrimitiveType::Triangle,
909 0,
910 6,
911 sprites.len() as u64,
912 );
913 *instance_offset = next_offset;
914 true
915 }
916
917 fn draw_surfaces(
918 &mut self,
919 surfaces: &[Surface],
920 instance_buffer: &mut metal::Buffer,
921 instance_offset: &mut usize,
922 viewport_size: Size<DevicePixels>,
923 command_encoder: &metal::RenderCommandEncoderRef,
924 ) -> bool {
925 command_encoder.set_render_pipeline_state(&self.surfaces_pipeline_state);
926 command_encoder.set_vertex_buffer(
927 SurfaceInputIndex::Vertices as u64,
928 Some(&self.unit_vertices),
929 0,
930 );
931 command_encoder.set_vertex_bytes(
932 SurfaceInputIndex::ViewportSize as u64,
933 mem::size_of_val(&viewport_size) as u64,
934 &viewport_size as *const Size<DevicePixels> as *const _,
935 );
936
937 for surface in surfaces {
938 let texture_size = size(
939 DevicePixels::from(surface.image_buffer.width() as i32),
940 DevicePixels::from(surface.image_buffer.height() as i32),
941 );
942
943 assert_eq!(
944 surface.image_buffer.pixel_format_type(),
945 media::core_video::kCVPixelFormatType_420YpCbCr8BiPlanarFullRange
946 );
947
948 let y_texture = unsafe {
949 self.core_video_texture_cache
950 .create_texture_from_image(
951 surface.image_buffer.as_concrete_TypeRef(),
952 ptr::null(),
953 MTLPixelFormat::R8Unorm,
954 surface.image_buffer.plane_width(0),
955 surface.image_buffer.plane_height(0),
956 0,
957 )
958 .unwrap()
959 };
960 let cb_cr_texture = unsafe {
961 self.core_video_texture_cache
962 .create_texture_from_image(
963 surface.image_buffer.as_concrete_TypeRef(),
964 ptr::null(),
965 MTLPixelFormat::RG8Unorm,
966 surface.image_buffer.plane_width(1),
967 surface.image_buffer.plane_height(1),
968 1,
969 )
970 .unwrap()
971 };
972
973 align_offset(instance_offset);
974 let next_offset = *instance_offset + mem::size_of::<Surface>();
975 if next_offset > INSTANCE_BUFFER_SIZE {
976 return false;
977 }
978
979 command_encoder.set_vertex_buffer(
980 SurfaceInputIndex::Surfaces as u64,
981 Some(instance_buffer),
982 *instance_offset as u64,
983 );
984 command_encoder.set_vertex_bytes(
985 SurfaceInputIndex::TextureSize as u64,
986 mem::size_of_val(&texture_size) as u64,
987 &texture_size as *const Size<DevicePixels> as *const _,
988 );
989 command_encoder.set_fragment_texture(
990 SurfaceInputIndex::YTexture as u64,
991 Some(y_texture.as_texture_ref()),
992 );
993 command_encoder.set_fragment_texture(
994 SurfaceInputIndex::CbCrTexture as u64,
995 Some(cb_cr_texture.as_texture_ref()),
996 );
997
998 unsafe {
999 let buffer_contents = (instance_buffer.contents() as *mut u8).add(*instance_offset)
1000 as *mut SurfaceBounds;
1001 ptr::write(
1002 buffer_contents,
1003 SurfaceBounds {
1004 bounds: surface.bounds,
1005 content_mask: surface.content_mask.clone(),
1006 },
1007 );
1008 }
1009
1010 command_encoder.draw_primitives(metal::MTLPrimitiveType::Triangle, 0, 6);
1011 *instance_offset = next_offset;
1012 }
1013 true
1014 }
1015}
1016
1017fn build_pipeline_state(
1018 device: &metal::DeviceRef,
1019 library: &metal::LibraryRef,
1020 label: &str,
1021 vertex_fn_name: &str,
1022 fragment_fn_name: &str,
1023 pixel_format: metal::MTLPixelFormat,
1024) -> metal::RenderPipelineState {
1025 let vertex_fn = library
1026 .get_function(vertex_fn_name, None)
1027 .expect("error locating vertex function");
1028 let fragment_fn = library
1029 .get_function(fragment_fn_name, None)
1030 .expect("error locating fragment function");
1031
1032 let descriptor = metal::RenderPipelineDescriptor::new();
1033 descriptor.set_label(label);
1034 descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
1035 descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
1036 let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
1037 color_attachment.set_pixel_format(pixel_format);
1038 color_attachment.set_blending_enabled(true);
1039 color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
1040 color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
1041 color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::SourceAlpha);
1042 color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::One);
1043 color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
1044 color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::One);
1045
1046 device
1047 .new_render_pipeline_state(&descriptor)
1048 .expect("could not create render pipeline state")
1049}
1050
1051fn build_path_rasterization_pipeline_state(
1052 device: &metal::DeviceRef,
1053 library: &metal::LibraryRef,
1054 label: &str,
1055 vertex_fn_name: &str,
1056 fragment_fn_name: &str,
1057 pixel_format: metal::MTLPixelFormat,
1058) -> metal::RenderPipelineState {
1059 let vertex_fn = library
1060 .get_function(vertex_fn_name, None)
1061 .expect("error locating vertex function");
1062 let fragment_fn = library
1063 .get_function(fragment_fn_name, None)
1064 .expect("error locating fragment function");
1065
1066 let descriptor = metal::RenderPipelineDescriptor::new();
1067 descriptor.set_label(label);
1068 descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
1069 descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
1070 let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
1071 color_attachment.set_pixel_format(pixel_format);
1072 color_attachment.set_blending_enabled(true);
1073 color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
1074 color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
1075 color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::One);
1076 color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::One);
1077 color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::One);
1078 color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::One);
1079
1080 device
1081 .new_render_pipeline_state(&descriptor)
1082 .expect("could not create render pipeline state")
1083}
1084
1085// Align to multiples of 256 make Metal happy.
1086fn align_offset(offset: &mut usize) {
1087 *offset = ((*offset + 255) / 256) * 256;
1088}
1089
1090#[repr(C)]
1091enum ShadowInputIndex {
1092 Vertices = 0,
1093 Shadows = 1,
1094 ViewportSize = 2,
1095}
1096
1097#[repr(C)]
1098enum QuadInputIndex {
1099 Vertices = 0,
1100 Quads = 1,
1101 ViewportSize = 2,
1102}
1103
1104#[repr(C)]
1105enum UnderlineInputIndex {
1106 Vertices = 0,
1107 Underlines = 1,
1108 ViewportSize = 2,
1109}
1110
1111#[repr(C)]
1112enum SpriteInputIndex {
1113 Vertices = 0,
1114 Sprites = 1,
1115 ViewportSize = 2,
1116 AtlasTextureSize = 3,
1117 AtlasTexture = 4,
1118}
1119
1120#[repr(C)]
1121enum SurfaceInputIndex {
1122 Vertices = 0,
1123 Surfaces = 1,
1124 ViewportSize = 2,
1125 TextureSize = 3,
1126 YTexture = 4,
1127 CbCrTexture = 5,
1128}
1129
1130#[repr(C)]
1131enum PathRasterizationInputIndex {
1132 Vertices = 0,
1133 AtlasTextureSize = 1,
1134}
1135
1136#[derive(Clone, Debug, Eq, PartialEq)]
1137#[repr(C)]
1138pub struct PathSprite {
1139 pub bounds: Bounds<ScaledPixels>,
1140 pub color: Hsla,
1141 pub tile: AtlasTile,
1142}
1143
1144#[derive(Clone, Debug, Eq, PartialEq)]
1145#[repr(C)]
1146pub struct SurfaceBounds {
1147 pub bounds: Bounds<ScaledPixels>,
1148 pub content_mask: ContentMask<ScaledPixels>,
1149}