1use crate::{
2 point, size, AtlasTextureId, AtlasTextureKind, AtlasTile, Bounds, ContentMask, DevicePixels,
3 Hsla, MetalAtlas, MonochromeSprite, Path, PathId, PathVertex, PolychromeSprite, PrimitiveBatch,
4 Quad, ScaledPixels, Scene, Shadow, Size, Surface, Underline,
5};
6use block::ConcreteBlock;
7use cocoa::{
8 base::{NO, YES},
9 foundation::NSUInteger,
10 quartzcore::AutoresizingMask,
11};
12use collections::HashMap;
13use core_foundation::base::TCFType;
14use foreign_types::ForeignType;
15use media::core_video::CVMetalTextureCache;
16use metal::{CommandQueue, MTLPixelFormat, MTLResourceOptions, NSRange};
17use objc::{self, msg_send, sel, sel_impl};
18use parking_lot::Mutex;
19use smallvec::SmallVec;
20use std::{cell::Cell, ffi::c_void, mem, ptr, sync::Arc};
21
22#[cfg(not(feature = "runtime_shaders"))]
23const SHADERS_METALLIB: &[u8] = include_bytes!(concat!(env!("OUT_DIR"), "/shaders.metallib"));
24#[cfg(feature = "runtime_shaders")]
25const SHADERS_SOURCE_FILE: &'static str =
26 include_str!(concat!(env!("OUT_DIR"), "/stitched_shaders.metal"));
27const INSTANCE_BUFFER_SIZE: usize = 2 * 1024 * 1024; // This is an arbitrary decision. There's probably a more optimal value (maybe even we could adjust dynamically...)
28
29pub(crate) struct MetalRenderer {
30 device: metal::Device,
31 layer: metal::MetalLayer,
32 presents_with_transaction: bool,
33 command_queue: CommandQueue,
34 paths_rasterization_pipeline_state: metal::RenderPipelineState,
35 path_sprites_pipeline_state: metal::RenderPipelineState,
36 shadows_pipeline_state: metal::RenderPipelineState,
37 quads_pipeline_state: metal::RenderPipelineState,
38 underlines_pipeline_state: metal::RenderPipelineState,
39 monochrome_sprites_pipeline_state: metal::RenderPipelineState,
40 polychrome_sprites_pipeline_state: metal::RenderPipelineState,
41 surfaces_pipeline_state: metal::RenderPipelineState,
42 unit_vertices: metal::Buffer,
43 #[allow(clippy::arc_with_non_send_sync)]
44 instance_buffer_pool: Arc<Mutex<Vec<metal::Buffer>>>,
45 sprite_atlas: Arc<MetalAtlas>,
46 core_video_texture_cache: CVMetalTextureCache,
47}
48
49impl MetalRenderer {
50 pub fn new(instance_buffer_pool: Arc<Mutex<Vec<metal::Buffer>>>) -> Self {
51 let device: metal::Device = if let Some(device) = metal::Device::system_default() {
52 device
53 } else {
54 log::error!("unable to access a compatible graphics device");
55 std::process::exit(1);
56 };
57
58 let layer = metal::MetalLayer::new();
59 layer.set_device(&device);
60 layer.set_pixel_format(MTLPixelFormat::BGRA8Unorm);
61 layer.set_opaque(true);
62 layer.set_maximum_drawable_count(3);
63 unsafe {
64 let _: () = msg_send![&*layer, setAllowsNextDrawableTimeout: NO];
65 let _: () = msg_send![&*layer, setNeedsDisplayOnBoundsChange: YES];
66 let _: () = msg_send![
67 &*layer,
68 setAutoresizingMask: AutoresizingMask::WIDTH_SIZABLE
69 | AutoresizingMask::HEIGHT_SIZABLE
70 ];
71 }
72 #[cfg(feature = "runtime_shaders")]
73 let library = device
74 .new_library_with_source(&SHADERS_SOURCE_FILE, &metal::CompileOptions::new())
75 .expect("error building metal library");
76 #[cfg(not(feature = "runtime_shaders"))]
77 let library = device
78 .new_library_with_data(SHADERS_METALLIB)
79 .expect("error building metal library");
80
81 fn to_float2_bits(point: crate::PointF) -> u64 {
82 let mut output = point.y.to_bits() as u64;
83 output <<= 32;
84 output |= point.x.to_bits() as u64;
85 output
86 }
87
88 let unit_vertices = [
89 to_float2_bits(point(0., 0.)),
90 to_float2_bits(point(1., 0.)),
91 to_float2_bits(point(0., 1.)),
92 to_float2_bits(point(0., 1.)),
93 to_float2_bits(point(1., 0.)),
94 to_float2_bits(point(1., 1.)),
95 ];
96 let unit_vertices = device.new_buffer_with_data(
97 unit_vertices.as_ptr() as *const c_void,
98 mem::size_of_val(&unit_vertices) as u64,
99 MTLResourceOptions::StorageModeManaged,
100 );
101
102 let paths_rasterization_pipeline_state = build_path_rasterization_pipeline_state(
103 &device,
104 &library,
105 "paths_rasterization",
106 "path_rasterization_vertex",
107 "path_rasterization_fragment",
108 MTLPixelFormat::R16Float,
109 );
110 let path_sprites_pipeline_state = build_pipeline_state(
111 &device,
112 &library,
113 "path_sprites",
114 "path_sprite_vertex",
115 "path_sprite_fragment",
116 MTLPixelFormat::BGRA8Unorm,
117 );
118 let shadows_pipeline_state = build_pipeline_state(
119 &device,
120 &library,
121 "shadows",
122 "shadow_vertex",
123 "shadow_fragment",
124 MTLPixelFormat::BGRA8Unorm,
125 );
126 let quads_pipeline_state = build_pipeline_state(
127 &device,
128 &library,
129 "quads",
130 "quad_vertex",
131 "quad_fragment",
132 MTLPixelFormat::BGRA8Unorm,
133 );
134 let underlines_pipeline_state = build_pipeline_state(
135 &device,
136 &library,
137 "underlines",
138 "underline_vertex",
139 "underline_fragment",
140 MTLPixelFormat::BGRA8Unorm,
141 );
142 let monochrome_sprites_pipeline_state = build_pipeline_state(
143 &device,
144 &library,
145 "monochrome_sprites",
146 "monochrome_sprite_vertex",
147 "monochrome_sprite_fragment",
148 MTLPixelFormat::BGRA8Unorm,
149 );
150 let polychrome_sprites_pipeline_state = build_pipeline_state(
151 &device,
152 &library,
153 "polychrome_sprites",
154 "polychrome_sprite_vertex",
155 "polychrome_sprite_fragment",
156 MTLPixelFormat::BGRA8Unorm,
157 );
158 let surfaces_pipeline_state = build_pipeline_state(
159 &device,
160 &library,
161 "surfaces",
162 "surface_vertex",
163 "surface_fragment",
164 MTLPixelFormat::BGRA8Unorm,
165 );
166
167 let command_queue = device.new_command_queue();
168 let sprite_atlas = Arc::new(MetalAtlas::new(device.clone()));
169 let core_video_texture_cache =
170 unsafe { CVMetalTextureCache::new(device.as_ptr()).unwrap() };
171
172 Self {
173 device,
174 layer,
175 presents_with_transaction: false,
176 command_queue,
177 paths_rasterization_pipeline_state,
178 path_sprites_pipeline_state,
179 shadows_pipeline_state,
180 quads_pipeline_state,
181 underlines_pipeline_state,
182 monochrome_sprites_pipeline_state,
183 polychrome_sprites_pipeline_state,
184 surfaces_pipeline_state,
185 unit_vertices,
186 instance_buffer_pool,
187 sprite_atlas,
188 core_video_texture_cache,
189 }
190 }
191
192 pub fn layer(&self) -> &metal::MetalLayerRef {
193 &self.layer
194 }
195
196 pub fn sprite_atlas(&self) -> &Arc<MetalAtlas> {
197 &self.sprite_atlas
198 }
199
200 pub fn set_presents_with_transaction(&mut self, presents_with_transaction: bool) {
201 self.presents_with_transaction = presents_with_transaction;
202 self.layer
203 .set_presents_with_transaction(presents_with_transaction);
204 }
205
206 pub fn draw(&mut self, scene: &Scene) {
207 let layer = self.layer.clone();
208 let viewport_size = layer.drawable_size();
209 let viewport_size: Size<DevicePixels> = size(
210 (viewport_size.width.ceil() as i32).into(),
211 (viewport_size.height.ceil() as i32).into(),
212 );
213 let drawable = if let Some(drawable) = layer.next_drawable() {
214 drawable
215 } else {
216 log::error!(
217 "failed to retrieve next drawable, drawable size: {:?}",
218 viewport_size
219 );
220 return;
221 };
222 let mut instance_buffer = self.instance_buffer_pool.lock().pop().unwrap_or_else(|| {
223 self.device.new_buffer(
224 INSTANCE_BUFFER_SIZE as u64,
225 MTLResourceOptions::StorageModeManaged,
226 )
227 });
228 let command_queue = self.command_queue.clone();
229 let command_buffer = command_queue.new_command_buffer();
230 let mut instance_offset = 0;
231
232 let Some(path_tiles) = self.rasterize_paths(
233 scene.paths(),
234 &mut instance_buffer,
235 &mut instance_offset,
236 command_buffer,
237 ) else {
238 log::error!("failed to rasterize {} paths", scene.paths().len());
239 return;
240 };
241
242 let render_pass_descriptor = metal::RenderPassDescriptor::new();
243 let color_attachment = render_pass_descriptor
244 .color_attachments()
245 .object_at(0)
246 .unwrap();
247
248 color_attachment.set_texture(Some(drawable.texture()));
249 color_attachment.set_load_action(metal::MTLLoadAction::Clear);
250 color_attachment.set_store_action(metal::MTLStoreAction::Store);
251 let alpha = if self.layer.is_opaque() { 1. } else { 0. };
252 color_attachment.set_clear_color(metal::MTLClearColor::new(0., 0., 0., alpha));
253 let command_encoder = command_buffer.new_render_command_encoder(render_pass_descriptor);
254
255 command_encoder.set_viewport(metal::MTLViewport {
256 originX: 0.0,
257 originY: 0.0,
258 width: i32::from(viewport_size.width) as f64,
259 height: i32::from(viewport_size.height) as f64,
260 znear: 0.0,
261 zfar: 1.0,
262 });
263 for batch in scene.batches() {
264 let ok = match batch {
265 PrimitiveBatch::Shadows(shadows) => self.draw_shadows(
266 shadows,
267 &mut instance_buffer,
268 &mut instance_offset,
269 viewport_size,
270 command_encoder,
271 ),
272 PrimitiveBatch::Quads(quads) => self.draw_quads(
273 quads,
274 &mut instance_buffer,
275 &mut instance_offset,
276 viewport_size,
277 command_encoder,
278 ),
279 PrimitiveBatch::Paths(paths) => self.draw_paths(
280 paths,
281 &path_tiles,
282 &mut instance_buffer,
283 &mut instance_offset,
284 viewport_size,
285 command_encoder,
286 ),
287 PrimitiveBatch::Underlines(underlines) => self.draw_underlines(
288 underlines,
289 &mut instance_buffer,
290 &mut instance_offset,
291 viewport_size,
292 command_encoder,
293 ),
294 PrimitiveBatch::MonochromeSprites {
295 texture_id,
296 sprites,
297 } => self.draw_monochrome_sprites(
298 texture_id,
299 sprites,
300 &mut instance_buffer,
301 &mut instance_offset,
302 viewport_size,
303 command_encoder,
304 ),
305 PrimitiveBatch::PolychromeSprites {
306 texture_id,
307 sprites,
308 } => self.draw_polychrome_sprites(
309 texture_id,
310 sprites,
311 &mut instance_buffer,
312 &mut instance_offset,
313 viewport_size,
314 command_encoder,
315 ),
316 PrimitiveBatch::Surfaces(surfaces) => self.draw_surfaces(
317 surfaces,
318 &mut instance_buffer,
319 &mut instance_offset,
320 viewport_size,
321 command_encoder,
322 ),
323 };
324
325 if !ok {
326 log::error!("scene too large: {} paths, {} shadows, {} quads, {} underlines, {} mono, {} poly, {} surfaces",
327 scene.paths.len(),
328 scene.shadows.len(),
329 scene.quads.len(),
330 scene.underlines.len(),
331 scene.monochrome_sprites.len(),
332 scene.polychrome_sprites.len(),
333 scene.surfaces.len(),
334 );
335 break;
336 }
337 }
338
339 command_encoder.end_encoding();
340
341 instance_buffer.did_modify_range(NSRange {
342 location: 0,
343 length: instance_offset as NSUInteger,
344 });
345
346 let instance_buffer_pool = self.instance_buffer_pool.clone();
347 let instance_buffer = Cell::new(Some(instance_buffer));
348 let block = ConcreteBlock::new(move |_| {
349 if let Some(instance_buffer) = instance_buffer.take() {
350 instance_buffer_pool.lock().push(instance_buffer);
351 }
352 });
353 let block = block.copy();
354 command_buffer.add_completed_handler(&block);
355
356 self.sprite_atlas.clear_textures(AtlasTextureKind::Path);
357
358 if self.presents_with_transaction {
359 command_buffer.commit();
360 command_buffer.wait_until_scheduled();
361 drawable.present();
362 } else {
363 command_buffer.present_drawable(drawable);
364 command_buffer.commit();
365 }
366 }
367
368 fn rasterize_paths(
369 &mut self,
370 paths: &[Path<ScaledPixels>],
371 instance_buffer: &mut metal::Buffer,
372 instance_offset: &mut usize,
373 command_buffer: &metal::CommandBufferRef,
374 ) -> Option<HashMap<PathId, AtlasTile>> {
375 let mut tiles = HashMap::default();
376 let mut vertices_by_texture_id = HashMap::default();
377 for path in paths {
378 let clipped_bounds = path.bounds.intersect(&path.content_mask.bounds);
379
380 let tile = self
381 .sprite_atlas
382 .allocate(clipped_bounds.size.map(Into::into), AtlasTextureKind::Path);
383 vertices_by_texture_id
384 .entry(tile.texture_id)
385 .or_insert(Vec::new())
386 .extend(path.vertices.iter().map(|vertex| PathVertex {
387 xy_position: vertex.xy_position - clipped_bounds.origin
388 + tile.bounds.origin.map(Into::into),
389 st_position: vertex.st_position,
390 content_mask: ContentMask {
391 bounds: tile.bounds.map(Into::into),
392 },
393 }));
394 tiles.insert(path.id, tile);
395 }
396
397 for (texture_id, vertices) in vertices_by_texture_id {
398 align_offset(instance_offset);
399 let vertices_bytes_len = mem::size_of_val(vertices.as_slice());
400 let next_offset = *instance_offset + vertices_bytes_len;
401 if next_offset > INSTANCE_BUFFER_SIZE {
402 return None;
403 }
404
405 let render_pass_descriptor = metal::RenderPassDescriptor::new();
406 let color_attachment = render_pass_descriptor
407 .color_attachments()
408 .object_at(0)
409 .unwrap();
410
411 let texture = self.sprite_atlas.metal_texture(texture_id);
412 color_attachment.set_texture(Some(&texture));
413 color_attachment.set_load_action(metal::MTLLoadAction::Clear);
414 color_attachment.set_store_action(metal::MTLStoreAction::Store);
415 color_attachment.set_clear_color(metal::MTLClearColor::new(0., 0., 0., 1.));
416 let command_encoder = command_buffer.new_render_command_encoder(render_pass_descriptor);
417 command_encoder.set_render_pipeline_state(&self.paths_rasterization_pipeline_state);
418 command_encoder.set_vertex_buffer(
419 PathRasterizationInputIndex::Vertices as u64,
420 Some(instance_buffer),
421 *instance_offset as u64,
422 );
423 let texture_size = Size {
424 width: DevicePixels::from(texture.width()),
425 height: DevicePixels::from(texture.height()),
426 };
427 command_encoder.set_vertex_bytes(
428 PathRasterizationInputIndex::AtlasTextureSize as u64,
429 mem::size_of_val(&texture_size) as u64,
430 &texture_size as *const Size<DevicePixels> as *const _,
431 );
432
433 let buffer_contents =
434 unsafe { (instance_buffer.contents() as *mut u8).add(*instance_offset) };
435 unsafe {
436 ptr::copy_nonoverlapping(
437 vertices.as_ptr() as *const u8,
438 buffer_contents,
439 vertices_bytes_len,
440 );
441 }
442
443 command_encoder.draw_primitives(
444 metal::MTLPrimitiveType::Triangle,
445 0,
446 vertices.len() as u64,
447 );
448 command_encoder.end_encoding();
449 *instance_offset = next_offset;
450 }
451
452 Some(tiles)
453 }
454
455 fn draw_shadows(
456 &mut self,
457 shadows: &[Shadow],
458 instance_buffer: &mut metal::Buffer,
459 instance_offset: &mut usize,
460 viewport_size: Size<DevicePixels>,
461 command_encoder: &metal::RenderCommandEncoderRef,
462 ) -> bool {
463 if shadows.is_empty() {
464 return true;
465 }
466 align_offset(instance_offset);
467
468 command_encoder.set_render_pipeline_state(&self.shadows_pipeline_state);
469 command_encoder.set_vertex_buffer(
470 ShadowInputIndex::Vertices as u64,
471 Some(&self.unit_vertices),
472 0,
473 );
474 command_encoder.set_vertex_buffer(
475 ShadowInputIndex::Shadows as u64,
476 Some(instance_buffer),
477 *instance_offset as u64,
478 );
479 command_encoder.set_fragment_buffer(
480 ShadowInputIndex::Shadows as u64,
481 Some(instance_buffer),
482 *instance_offset as u64,
483 );
484
485 command_encoder.set_vertex_bytes(
486 ShadowInputIndex::ViewportSize as u64,
487 mem::size_of_val(&viewport_size) as u64,
488 &viewport_size as *const Size<DevicePixels> as *const _,
489 );
490
491 let shadow_bytes_len = mem::size_of_val(shadows);
492 let buffer_contents =
493 unsafe { (instance_buffer.contents() as *mut u8).add(*instance_offset) };
494
495 let next_offset = *instance_offset + shadow_bytes_len;
496 if next_offset > INSTANCE_BUFFER_SIZE {
497 return false;
498 }
499
500 unsafe {
501 ptr::copy_nonoverlapping(
502 shadows.as_ptr() as *const u8,
503 buffer_contents,
504 shadow_bytes_len,
505 );
506 }
507
508 command_encoder.draw_primitives_instanced(
509 metal::MTLPrimitiveType::Triangle,
510 0,
511 6,
512 shadows.len() as u64,
513 );
514 *instance_offset = next_offset;
515 true
516 }
517
518 fn draw_quads(
519 &mut self,
520 quads: &[Quad],
521 instance_buffer: &mut metal::Buffer,
522 instance_offset: &mut usize,
523 viewport_size: Size<DevicePixels>,
524 command_encoder: &metal::RenderCommandEncoderRef,
525 ) -> bool {
526 if quads.is_empty() {
527 return true;
528 }
529 align_offset(instance_offset);
530
531 command_encoder.set_render_pipeline_state(&self.quads_pipeline_state);
532 command_encoder.set_vertex_buffer(
533 QuadInputIndex::Vertices as u64,
534 Some(&self.unit_vertices),
535 0,
536 );
537 command_encoder.set_vertex_buffer(
538 QuadInputIndex::Quads as u64,
539 Some(instance_buffer),
540 *instance_offset as u64,
541 );
542 command_encoder.set_fragment_buffer(
543 QuadInputIndex::Quads as u64,
544 Some(instance_buffer),
545 *instance_offset as u64,
546 );
547
548 command_encoder.set_vertex_bytes(
549 QuadInputIndex::ViewportSize as u64,
550 mem::size_of_val(&viewport_size) as u64,
551 &viewport_size as *const Size<DevicePixels> as *const _,
552 );
553
554 let quad_bytes_len = mem::size_of_val(quads);
555 let buffer_contents =
556 unsafe { (instance_buffer.contents() as *mut u8).add(*instance_offset) };
557
558 let next_offset = *instance_offset + quad_bytes_len;
559 if next_offset > INSTANCE_BUFFER_SIZE {
560 return false;
561 }
562
563 unsafe {
564 ptr::copy_nonoverlapping(quads.as_ptr() as *const u8, buffer_contents, quad_bytes_len);
565 }
566
567 command_encoder.draw_primitives_instanced(
568 metal::MTLPrimitiveType::Triangle,
569 0,
570 6,
571 quads.len() as u64,
572 );
573 *instance_offset = next_offset;
574 true
575 }
576
577 fn draw_paths(
578 &mut self,
579 paths: &[Path<ScaledPixels>],
580 tiles_by_path_id: &HashMap<PathId, AtlasTile>,
581 instance_buffer: &mut metal::Buffer,
582 instance_offset: &mut usize,
583 viewport_size: Size<DevicePixels>,
584 command_encoder: &metal::RenderCommandEncoderRef,
585 ) -> bool {
586 if paths.is_empty() {
587 return true;
588 }
589
590 command_encoder.set_render_pipeline_state(&self.path_sprites_pipeline_state);
591 command_encoder.set_vertex_buffer(
592 SpriteInputIndex::Vertices as u64,
593 Some(&self.unit_vertices),
594 0,
595 );
596 command_encoder.set_vertex_bytes(
597 SpriteInputIndex::ViewportSize as u64,
598 mem::size_of_val(&viewport_size) as u64,
599 &viewport_size as *const Size<DevicePixels> as *const _,
600 );
601
602 let mut prev_texture_id = None;
603 let mut sprites = SmallVec::<[_; 1]>::new();
604 let mut paths_and_tiles = paths
605 .iter()
606 .map(|path| (path, tiles_by_path_id.get(&path.id).unwrap()))
607 .peekable();
608
609 loop {
610 if let Some((path, tile)) = paths_and_tiles.peek() {
611 if prev_texture_id.map_or(true, |texture_id| texture_id == tile.texture_id) {
612 prev_texture_id = Some(tile.texture_id);
613 let origin = path.bounds.intersect(&path.content_mask.bounds).origin;
614 sprites.push(PathSprite {
615 bounds: Bounds {
616 origin: origin.map(|p| p.floor()),
617 size: tile.bounds.size.map(Into::into),
618 },
619 color: path.color,
620 tile: (*tile).clone(),
621 });
622 paths_and_tiles.next();
623 continue;
624 }
625 }
626
627 if sprites.is_empty() {
628 break;
629 } else {
630 align_offset(instance_offset);
631 let texture_id = prev_texture_id.take().unwrap();
632 let texture: metal::Texture = self.sprite_atlas.metal_texture(texture_id);
633 let texture_size = size(
634 DevicePixels(texture.width() as i32),
635 DevicePixels(texture.height() as i32),
636 );
637
638 command_encoder.set_vertex_buffer(
639 SpriteInputIndex::Sprites as u64,
640 Some(instance_buffer),
641 *instance_offset as u64,
642 );
643 command_encoder.set_vertex_bytes(
644 SpriteInputIndex::AtlasTextureSize as u64,
645 mem::size_of_val(&texture_size) as u64,
646 &texture_size as *const Size<DevicePixels> as *const _,
647 );
648 command_encoder.set_fragment_buffer(
649 SpriteInputIndex::Sprites as u64,
650 Some(instance_buffer),
651 *instance_offset as u64,
652 );
653 command_encoder
654 .set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
655
656 let sprite_bytes_len = mem::size_of_val(sprites.as_slice());
657 let next_offset = *instance_offset + sprite_bytes_len;
658 if next_offset > INSTANCE_BUFFER_SIZE {
659 return false;
660 }
661
662 let buffer_contents =
663 unsafe { (instance_buffer.contents() as *mut u8).add(*instance_offset) };
664
665 unsafe {
666 ptr::copy_nonoverlapping(
667 sprites.as_ptr() as *const u8,
668 buffer_contents,
669 sprite_bytes_len,
670 );
671 }
672
673 command_encoder.draw_primitives_instanced(
674 metal::MTLPrimitiveType::Triangle,
675 0,
676 6,
677 sprites.len() as u64,
678 );
679 *instance_offset = next_offset;
680 sprites.clear();
681 }
682 }
683 true
684 }
685
686 fn draw_underlines(
687 &mut self,
688 underlines: &[Underline],
689 instance_buffer: &mut metal::Buffer,
690 instance_offset: &mut usize,
691 viewport_size: Size<DevicePixels>,
692 command_encoder: &metal::RenderCommandEncoderRef,
693 ) -> bool {
694 if underlines.is_empty() {
695 return true;
696 }
697 align_offset(instance_offset);
698
699 command_encoder.set_render_pipeline_state(&self.underlines_pipeline_state);
700 command_encoder.set_vertex_buffer(
701 UnderlineInputIndex::Vertices as u64,
702 Some(&self.unit_vertices),
703 0,
704 );
705 command_encoder.set_vertex_buffer(
706 UnderlineInputIndex::Underlines as u64,
707 Some(instance_buffer),
708 *instance_offset as u64,
709 );
710 command_encoder.set_fragment_buffer(
711 UnderlineInputIndex::Underlines as u64,
712 Some(instance_buffer),
713 *instance_offset as u64,
714 );
715
716 command_encoder.set_vertex_bytes(
717 UnderlineInputIndex::ViewportSize as u64,
718 mem::size_of_val(&viewport_size) as u64,
719 &viewport_size as *const Size<DevicePixels> as *const _,
720 );
721
722 let underline_bytes_len = mem::size_of_val(underlines);
723 let buffer_contents =
724 unsafe { (instance_buffer.contents() as *mut u8).add(*instance_offset) };
725
726 let next_offset = *instance_offset + underline_bytes_len;
727 if next_offset > INSTANCE_BUFFER_SIZE {
728 return false;
729 }
730
731 unsafe {
732 ptr::copy_nonoverlapping(
733 underlines.as_ptr() as *const u8,
734 buffer_contents,
735 underline_bytes_len,
736 );
737 }
738
739 command_encoder.draw_primitives_instanced(
740 metal::MTLPrimitiveType::Triangle,
741 0,
742 6,
743 underlines.len() as u64,
744 );
745 *instance_offset = next_offset;
746 true
747 }
748
749 fn draw_monochrome_sprites(
750 &mut self,
751 texture_id: AtlasTextureId,
752 sprites: &[MonochromeSprite],
753 instance_buffer: &mut metal::Buffer,
754 instance_offset: &mut usize,
755 viewport_size: Size<DevicePixels>,
756 command_encoder: &metal::RenderCommandEncoderRef,
757 ) -> bool {
758 if sprites.is_empty() {
759 return true;
760 }
761 align_offset(instance_offset);
762
763 let texture = self.sprite_atlas.metal_texture(texture_id);
764 let texture_size = size(
765 DevicePixels(texture.width() as i32),
766 DevicePixels(texture.height() as i32),
767 );
768 command_encoder.set_render_pipeline_state(&self.monochrome_sprites_pipeline_state);
769 command_encoder.set_vertex_buffer(
770 SpriteInputIndex::Vertices as u64,
771 Some(&self.unit_vertices),
772 0,
773 );
774 command_encoder.set_vertex_buffer(
775 SpriteInputIndex::Sprites as u64,
776 Some(instance_buffer),
777 *instance_offset as u64,
778 );
779 command_encoder.set_vertex_bytes(
780 SpriteInputIndex::ViewportSize as u64,
781 mem::size_of_val(&viewport_size) as u64,
782 &viewport_size as *const Size<DevicePixels> as *const _,
783 );
784 command_encoder.set_vertex_bytes(
785 SpriteInputIndex::AtlasTextureSize as u64,
786 mem::size_of_val(&texture_size) as u64,
787 &texture_size as *const Size<DevicePixels> as *const _,
788 );
789 command_encoder.set_fragment_buffer(
790 SpriteInputIndex::Sprites as u64,
791 Some(instance_buffer),
792 *instance_offset as u64,
793 );
794 command_encoder.set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
795
796 let sprite_bytes_len = mem::size_of_val(sprites);
797 let buffer_contents =
798 unsafe { (instance_buffer.contents() as *mut u8).add(*instance_offset) };
799
800 let next_offset = *instance_offset + sprite_bytes_len;
801 if next_offset > INSTANCE_BUFFER_SIZE {
802 return false;
803 }
804
805 unsafe {
806 ptr::copy_nonoverlapping(
807 sprites.as_ptr() as *const u8,
808 buffer_contents,
809 sprite_bytes_len,
810 );
811 }
812
813 command_encoder.draw_primitives_instanced(
814 metal::MTLPrimitiveType::Triangle,
815 0,
816 6,
817 sprites.len() as u64,
818 );
819 *instance_offset = next_offset;
820 true
821 }
822
823 fn draw_polychrome_sprites(
824 &mut self,
825 texture_id: AtlasTextureId,
826 sprites: &[PolychromeSprite],
827 instance_buffer: &mut metal::Buffer,
828 instance_offset: &mut usize,
829 viewport_size: Size<DevicePixels>,
830 command_encoder: &metal::RenderCommandEncoderRef,
831 ) -> bool {
832 if sprites.is_empty() {
833 return true;
834 }
835 align_offset(instance_offset);
836
837 let texture = self.sprite_atlas.metal_texture(texture_id);
838 let texture_size = size(
839 DevicePixels(texture.width() as i32),
840 DevicePixels(texture.height() as i32),
841 );
842 command_encoder.set_render_pipeline_state(&self.polychrome_sprites_pipeline_state);
843 command_encoder.set_vertex_buffer(
844 SpriteInputIndex::Vertices as u64,
845 Some(&self.unit_vertices),
846 0,
847 );
848 command_encoder.set_vertex_buffer(
849 SpriteInputIndex::Sprites as u64,
850 Some(instance_buffer),
851 *instance_offset as u64,
852 );
853 command_encoder.set_vertex_bytes(
854 SpriteInputIndex::ViewportSize as u64,
855 mem::size_of_val(&viewport_size) as u64,
856 &viewport_size as *const Size<DevicePixels> as *const _,
857 );
858 command_encoder.set_vertex_bytes(
859 SpriteInputIndex::AtlasTextureSize as u64,
860 mem::size_of_val(&texture_size) as u64,
861 &texture_size as *const Size<DevicePixels> as *const _,
862 );
863 command_encoder.set_fragment_buffer(
864 SpriteInputIndex::Sprites as u64,
865 Some(instance_buffer),
866 *instance_offset as u64,
867 );
868 command_encoder.set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
869
870 let sprite_bytes_len = mem::size_of_val(sprites);
871 let buffer_contents =
872 unsafe { (instance_buffer.contents() as *mut u8).add(*instance_offset) };
873
874 let next_offset = *instance_offset + sprite_bytes_len;
875 if next_offset > INSTANCE_BUFFER_SIZE {
876 return false;
877 }
878
879 unsafe {
880 ptr::copy_nonoverlapping(
881 sprites.as_ptr() as *const u8,
882 buffer_contents,
883 sprite_bytes_len,
884 );
885 }
886
887 command_encoder.draw_primitives_instanced(
888 metal::MTLPrimitiveType::Triangle,
889 0,
890 6,
891 sprites.len() as u64,
892 );
893 *instance_offset = next_offset;
894 true
895 }
896
897 fn draw_surfaces(
898 &mut self,
899 surfaces: &[Surface],
900 instance_buffer: &mut metal::Buffer,
901 instance_offset: &mut usize,
902 viewport_size: Size<DevicePixels>,
903 command_encoder: &metal::RenderCommandEncoderRef,
904 ) -> bool {
905 command_encoder.set_render_pipeline_state(&self.surfaces_pipeline_state);
906 command_encoder.set_vertex_buffer(
907 SurfaceInputIndex::Vertices as u64,
908 Some(&self.unit_vertices),
909 0,
910 );
911 command_encoder.set_vertex_bytes(
912 SurfaceInputIndex::ViewportSize as u64,
913 mem::size_of_val(&viewport_size) as u64,
914 &viewport_size as *const Size<DevicePixels> as *const _,
915 );
916
917 for surface in surfaces {
918 let texture_size = size(
919 DevicePixels::from(surface.image_buffer.width() as i32),
920 DevicePixels::from(surface.image_buffer.height() as i32),
921 );
922
923 assert_eq!(
924 surface.image_buffer.pixel_format_type(),
925 media::core_video::kCVPixelFormatType_420YpCbCr8BiPlanarFullRange
926 );
927
928 let y_texture = unsafe {
929 self.core_video_texture_cache
930 .create_texture_from_image(
931 surface.image_buffer.as_concrete_TypeRef(),
932 ptr::null(),
933 MTLPixelFormat::R8Unorm,
934 surface.image_buffer.plane_width(0),
935 surface.image_buffer.plane_height(0),
936 0,
937 )
938 .unwrap()
939 };
940 let cb_cr_texture = unsafe {
941 self.core_video_texture_cache
942 .create_texture_from_image(
943 surface.image_buffer.as_concrete_TypeRef(),
944 ptr::null(),
945 MTLPixelFormat::RG8Unorm,
946 surface.image_buffer.plane_width(1),
947 surface.image_buffer.plane_height(1),
948 1,
949 )
950 .unwrap()
951 };
952
953 align_offset(instance_offset);
954 let next_offset = *instance_offset + mem::size_of::<Surface>();
955 if next_offset > INSTANCE_BUFFER_SIZE {
956 return false;
957 }
958
959 command_encoder.set_vertex_buffer(
960 SurfaceInputIndex::Surfaces as u64,
961 Some(instance_buffer),
962 *instance_offset as u64,
963 );
964 command_encoder.set_vertex_bytes(
965 SurfaceInputIndex::TextureSize as u64,
966 mem::size_of_val(&texture_size) as u64,
967 &texture_size as *const Size<DevicePixels> as *const _,
968 );
969 command_encoder.set_fragment_texture(
970 SurfaceInputIndex::YTexture as u64,
971 Some(y_texture.as_texture_ref()),
972 );
973 command_encoder.set_fragment_texture(
974 SurfaceInputIndex::CbCrTexture as u64,
975 Some(cb_cr_texture.as_texture_ref()),
976 );
977
978 unsafe {
979 let buffer_contents = (instance_buffer.contents() as *mut u8).add(*instance_offset)
980 as *mut SurfaceBounds;
981 ptr::write(
982 buffer_contents,
983 SurfaceBounds {
984 bounds: surface.bounds,
985 content_mask: surface.content_mask.clone(),
986 },
987 );
988 }
989
990 command_encoder.draw_primitives(metal::MTLPrimitiveType::Triangle, 0, 6);
991 *instance_offset = next_offset;
992 }
993 true
994 }
995}
996
997fn build_pipeline_state(
998 device: &metal::DeviceRef,
999 library: &metal::LibraryRef,
1000 label: &str,
1001 vertex_fn_name: &str,
1002 fragment_fn_name: &str,
1003 pixel_format: metal::MTLPixelFormat,
1004) -> metal::RenderPipelineState {
1005 let vertex_fn = library
1006 .get_function(vertex_fn_name, None)
1007 .expect("error locating vertex function");
1008 let fragment_fn = library
1009 .get_function(fragment_fn_name, None)
1010 .expect("error locating fragment function");
1011
1012 let descriptor = metal::RenderPipelineDescriptor::new();
1013 descriptor.set_label(label);
1014 descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
1015 descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
1016 let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
1017 color_attachment.set_pixel_format(pixel_format);
1018 color_attachment.set_blending_enabled(true);
1019 color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
1020 color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
1021 color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::SourceAlpha);
1022 color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::One);
1023 color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
1024 color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::One);
1025
1026 device
1027 .new_render_pipeline_state(&descriptor)
1028 .expect("could not create render pipeline state")
1029}
1030
1031fn build_path_rasterization_pipeline_state(
1032 device: &metal::DeviceRef,
1033 library: &metal::LibraryRef,
1034 label: &str,
1035 vertex_fn_name: &str,
1036 fragment_fn_name: &str,
1037 pixel_format: metal::MTLPixelFormat,
1038) -> metal::RenderPipelineState {
1039 let vertex_fn = library
1040 .get_function(vertex_fn_name, None)
1041 .expect("error locating vertex function");
1042 let fragment_fn = library
1043 .get_function(fragment_fn_name, None)
1044 .expect("error locating fragment function");
1045
1046 let descriptor = metal::RenderPipelineDescriptor::new();
1047 descriptor.set_label(label);
1048 descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
1049 descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
1050 let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
1051 color_attachment.set_pixel_format(pixel_format);
1052 color_attachment.set_blending_enabled(true);
1053 color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
1054 color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
1055 color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::One);
1056 color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::One);
1057 color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::One);
1058 color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::One);
1059
1060 device
1061 .new_render_pipeline_state(&descriptor)
1062 .expect("could not create render pipeline state")
1063}
1064
1065// Align to multiples of 256 make Metal happy.
1066fn align_offset(offset: &mut usize) {
1067 *offset = ((*offset + 255) / 256) * 256;
1068}
1069
1070#[repr(C)]
1071enum ShadowInputIndex {
1072 Vertices = 0,
1073 Shadows = 1,
1074 ViewportSize = 2,
1075}
1076
1077#[repr(C)]
1078enum QuadInputIndex {
1079 Vertices = 0,
1080 Quads = 1,
1081 ViewportSize = 2,
1082}
1083
1084#[repr(C)]
1085enum UnderlineInputIndex {
1086 Vertices = 0,
1087 Underlines = 1,
1088 ViewportSize = 2,
1089}
1090
1091#[repr(C)]
1092enum SpriteInputIndex {
1093 Vertices = 0,
1094 Sprites = 1,
1095 ViewportSize = 2,
1096 AtlasTextureSize = 3,
1097 AtlasTexture = 4,
1098}
1099
1100#[repr(C)]
1101enum SurfaceInputIndex {
1102 Vertices = 0,
1103 Surfaces = 1,
1104 ViewportSize = 2,
1105 TextureSize = 3,
1106 YTexture = 4,
1107 CbCrTexture = 5,
1108}
1109
1110#[repr(C)]
1111enum PathRasterizationInputIndex {
1112 Vertices = 0,
1113 AtlasTextureSize = 1,
1114}
1115
1116#[derive(Clone, Debug, Eq, PartialEq)]
1117#[repr(C)]
1118pub struct PathSprite {
1119 pub bounds: Bounds<ScaledPixels>,
1120 pub color: Hsla,
1121 pub tile: AtlasTile,
1122}
1123
1124#[derive(Clone, Debug, Eq, PartialEq)]
1125#[repr(C)]
1126pub struct SurfaceBounds {
1127 pub bounds: Bounds<ScaledPixels>,
1128 pub content_mask: ContentMask<ScaledPixels>,
1129}