1use crate::{
2 point, size, AtlasTextureId, AtlasTextureKind, AtlasTile, Bounds, ContentMask, DevicePixels,
3 Hsla, MetalAtlas, MonochromeSprite, Path, PathId, PathVertex, PolychromeSprite, PrimitiveBatch,
4 Quad, ScaledPixels, Scene, Shadow, Size, Surface, Underline,
5};
6use cocoa::{
7 base::{NO, YES},
8 foundation::NSUInteger,
9 quartzcore::AutoresizingMask,
10};
11use collections::HashMap;
12use core_foundation::base::TCFType;
13use foreign_types::ForeignType;
14use media::core_video::CVMetalTextureCache;
15use metal::{CommandQueue, MTLPixelFormat, MTLResourceOptions, NSRange};
16use objc::{self, msg_send, sel, sel_impl};
17use smallvec::SmallVec;
18use std::{ffi::c_void, mem, ptr, sync::Arc};
19
20const SHADERS_METALLIB: &[u8] = include_bytes!(concat!(env!("OUT_DIR"), "/shaders.metallib"));
21const INSTANCE_BUFFER_SIZE: usize = 8192 * 1024; // This is an arbitrary decision. There's probably a more optimal value.
22
23pub(crate) struct MetalRenderer {
24 layer: metal::MetalLayer,
25 command_queue: CommandQueue,
26 paths_rasterization_pipeline_state: metal::RenderPipelineState,
27 path_sprites_pipeline_state: metal::RenderPipelineState,
28 shadows_pipeline_state: metal::RenderPipelineState,
29 quads_pipeline_state: metal::RenderPipelineState,
30 underlines_pipeline_state: metal::RenderPipelineState,
31 monochrome_sprites_pipeline_state: metal::RenderPipelineState,
32 polychrome_sprites_pipeline_state: metal::RenderPipelineState,
33 surfaces_pipeline_state: metal::RenderPipelineState,
34 unit_vertices: metal::Buffer,
35 instances: metal::Buffer,
36 sprite_atlas: Arc<MetalAtlas>,
37 core_video_texture_cache: CVMetalTextureCache,
38}
39
40impl MetalRenderer {
41 pub fn new(is_opaque: bool) -> Self {
42 let device: metal::Device = if let Some(device) = metal::Device::system_default() {
43 device
44 } else {
45 log::error!("unable to access a compatible graphics device");
46 std::process::exit(1);
47 };
48
49 let layer = metal::MetalLayer::new();
50 layer.set_device(&device);
51 layer.set_pixel_format(MTLPixelFormat::BGRA8Unorm);
52 layer.set_presents_with_transaction(true);
53 layer.set_opaque(is_opaque);
54 unsafe {
55 let _: () = msg_send![&*layer, setAllowsNextDrawableTimeout: NO];
56 let _: () = msg_send![&*layer, setNeedsDisplayOnBoundsChange: YES];
57 let _: () = msg_send![
58 &*layer,
59 setAutoresizingMask: AutoresizingMask::WIDTH_SIZABLE
60 | AutoresizingMask::HEIGHT_SIZABLE
61 ];
62 }
63
64 let library = device
65 .new_library_with_data(SHADERS_METALLIB)
66 .expect("error building metal library");
67
68 fn to_float2_bits(point: crate::PointF) -> u64 {
69 unsafe {
70 let mut output = mem::transmute::<_, u32>(point.y.to_bits()) as u64;
71 output <<= 32;
72 output |= mem::transmute::<_, u32>(point.x.to_bits()) as u64;
73 output
74 }
75 }
76
77 let unit_vertices = [
78 to_float2_bits(point(0., 0.)),
79 to_float2_bits(point(1., 0.)),
80 to_float2_bits(point(0., 1.)),
81 to_float2_bits(point(0., 1.)),
82 to_float2_bits(point(1., 0.)),
83 to_float2_bits(point(1., 1.)),
84 ];
85 let unit_vertices = device.new_buffer_with_data(
86 unit_vertices.as_ptr() as *const c_void,
87 (unit_vertices.len() * mem::size_of::<u64>()) as u64,
88 MTLResourceOptions::StorageModeManaged,
89 );
90 let instances = device.new_buffer(
91 INSTANCE_BUFFER_SIZE as u64,
92 MTLResourceOptions::StorageModeManaged,
93 );
94
95 let paths_rasterization_pipeline_state = build_path_rasterization_pipeline_state(
96 &device,
97 &library,
98 "paths_rasterization",
99 "path_rasterization_vertex",
100 "path_rasterization_fragment",
101 MTLPixelFormat::R16Float,
102 );
103 let path_sprites_pipeline_state = build_pipeline_state(
104 &device,
105 &library,
106 "path_sprites",
107 "path_sprite_vertex",
108 "path_sprite_fragment",
109 MTLPixelFormat::BGRA8Unorm,
110 );
111 let shadows_pipeline_state = build_pipeline_state(
112 &device,
113 &library,
114 "shadows",
115 "shadow_vertex",
116 "shadow_fragment",
117 MTLPixelFormat::BGRA8Unorm,
118 );
119 let quads_pipeline_state = build_pipeline_state(
120 &device,
121 &library,
122 "quads",
123 "quad_vertex",
124 "quad_fragment",
125 MTLPixelFormat::BGRA8Unorm,
126 );
127 let underlines_pipeline_state = build_pipeline_state(
128 &device,
129 &library,
130 "underlines",
131 "underline_vertex",
132 "underline_fragment",
133 MTLPixelFormat::BGRA8Unorm,
134 );
135 let monochrome_sprites_pipeline_state = build_pipeline_state(
136 &device,
137 &library,
138 "monochrome_sprites",
139 "monochrome_sprite_vertex",
140 "monochrome_sprite_fragment",
141 MTLPixelFormat::BGRA8Unorm,
142 );
143 let polychrome_sprites_pipeline_state = build_pipeline_state(
144 &device,
145 &library,
146 "polychrome_sprites",
147 "polychrome_sprite_vertex",
148 "polychrome_sprite_fragment",
149 MTLPixelFormat::BGRA8Unorm,
150 );
151 let surfaces_pipeline_state = build_pipeline_state(
152 &device,
153 &library,
154 "surfaces",
155 "surface_vertex",
156 "surface_fragment",
157 MTLPixelFormat::BGRA8Unorm,
158 );
159
160 let command_queue = device.new_command_queue();
161 let sprite_atlas = Arc::new(MetalAtlas::new(device.clone()));
162
163 Self {
164 layer,
165 command_queue,
166 paths_rasterization_pipeline_state,
167 path_sprites_pipeline_state,
168 shadows_pipeline_state,
169 quads_pipeline_state,
170 underlines_pipeline_state,
171 monochrome_sprites_pipeline_state,
172 polychrome_sprites_pipeline_state,
173 surfaces_pipeline_state,
174 unit_vertices,
175 instances,
176 sprite_atlas,
177 core_video_texture_cache: CVMetalTextureCache::new(device.as_ptr()).unwrap(),
178 }
179 }
180
181 pub fn layer(&self) -> &metal::MetalLayerRef {
182 &*self.layer
183 }
184
185 pub fn sprite_atlas(&self) -> &Arc<MetalAtlas> {
186 &self.sprite_atlas
187 }
188
189 pub fn draw(&mut self, scene: &Scene) {
190 let layer = self.layer.clone();
191 let viewport_size = layer.drawable_size();
192 let viewport_size: Size<DevicePixels> = size(
193 (viewport_size.width.ceil() as i32).into(),
194 (viewport_size.height.ceil() as i32).into(),
195 );
196 let drawable = if let Some(drawable) = layer.next_drawable() {
197 drawable
198 } else {
199 log::error!(
200 "failed to retrieve next drawable, drawable size: {:?}",
201 viewport_size
202 );
203 return;
204 };
205 let command_queue = self.command_queue.clone();
206 let command_buffer = command_queue.new_command_buffer();
207 let mut instance_offset = 0;
208
209 let path_tiles = self.rasterize_paths(scene.paths(), &mut instance_offset, &command_buffer);
210
211 let render_pass_descriptor = metal::RenderPassDescriptor::new();
212 let color_attachment = render_pass_descriptor
213 .color_attachments()
214 .object_at(0)
215 .unwrap();
216
217 color_attachment.set_texture(Some(drawable.texture()));
218 color_attachment.set_load_action(metal::MTLLoadAction::Clear);
219 color_attachment.set_store_action(metal::MTLStoreAction::Store);
220 let alpha = if self.layer.is_opaque() { 1. } else { 0. };
221 color_attachment.set_clear_color(metal::MTLClearColor::new(0., 0., 0., alpha));
222 let command_encoder = command_buffer.new_render_command_encoder(render_pass_descriptor);
223
224 command_encoder.set_viewport(metal::MTLViewport {
225 originX: 0.0,
226 originY: 0.0,
227 width: i32::from(viewport_size.width) as f64,
228 height: i32::from(viewport_size.height) as f64,
229 znear: 0.0,
230 zfar: 1.0,
231 });
232 for batch in scene.batches() {
233 match batch {
234 PrimitiveBatch::Shadows(shadows) => {
235 self.draw_shadows(
236 shadows,
237 &mut instance_offset,
238 viewport_size,
239 command_encoder,
240 );
241 }
242 PrimitiveBatch::Quads(quads) => {
243 self.draw_quads(quads, &mut instance_offset, viewport_size, command_encoder);
244 }
245 PrimitiveBatch::Paths(paths) => {
246 self.draw_paths(
247 paths,
248 &path_tiles,
249 &mut instance_offset,
250 viewport_size,
251 command_encoder,
252 );
253 }
254 PrimitiveBatch::Underlines(underlines) => {
255 self.draw_underlines(
256 underlines,
257 &mut instance_offset,
258 viewport_size,
259 command_encoder,
260 );
261 }
262 PrimitiveBatch::MonochromeSprites {
263 texture_id,
264 sprites,
265 } => {
266 self.draw_monochrome_sprites(
267 texture_id,
268 sprites,
269 &mut instance_offset,
270 viewport_size,
271 command_encoder,
272 );
273 }
274 PrimitiveBatch::PolychromeSprites {
275 texture_id,
276 sprites,
277 } => {
278 self.draw_polychrome_sprites(
279 texture_id,
280 sprites,
281 &mut instance_offset,
282 viewport_size,
283 command_encoder,
284 );
285 }
286 PrimitiveBatch::Surfaces(surfaces) => {
287 self.draw_surfaces(
288 surfaces,
289 &mut instance_offset,
290 viewport_size,
291 command_encoder,
292 );
293 }
294 }
295 }
296
297 command_encoder.end_encoding();
298
299 self.instances.did_modify_range(NSRange {
300 location: 0,
301 length: instance_offset as NSUInteger,
302 });
303
304 command_buffer.commit();
305 self.sprite_atlas.clear_textures(AtlasTextureKind::Path);
306 command_buffer.wait_until_completed();
307 drawable.present();
308 }
309
310 fn rasterize_paths(
311 &mut self,
312 paths: &[Path<ScaledPixels>],
313 offset: &mut usize,
314 command_buffer: &metal::CommandBufferRef,
315 ) -> HashMap<PathId, AtlasTile> {
316 let mut tiles = HashMap::default();
317 let mut vertices_by_texture_id = HashMap::default();
318 for path in paths {
319 let clipped_bounds = path.bounds.intersect(&path.content_mask.bounds);
320
321 let tile = self
322 .sprite_atlas
323 .allocate(clipped_bounds.size.map(Into::into), AtlasTextureKind::Path);
324 vertices_by_texture_id
325 .entry(tile.texture_id)
326 .or_insert(Vec::new())
327 .extend(path.vertices.iter().map(|vertex| PathVertex {
328 xy_position: vertex.xy_position - clipped_bounds.origin
329 + tile.bounds.origin.map(Into::into),
330 st_position: vertex.st_position,
331 content_mask: ContentMask {
332 bounds: tile.bounds.map(Into::into),
333 },
334 }));
335 tiles.insert(path.id, tile);
336 }
337
338 for (texture_id, vertices) in vertices_by_texture_id {
339 align_offset(offset);
340 let next_offset = *offset + vertices.len() * mem::size_of::<PathVertex<ScaledPixels>>();
341 assert!(
342 next_offset <= INSTANCE_BUFFER_SIZE,
343 "instance buffer exhausted"
344 );
345
346 let render_pass_descriptor = metal::RenderPassDescriptor::new();
347 let color_attachment = render_pass_descriptor
348 .color_attachments()
349 .object_at(0)
350 .unwrap();
351
352 let texture = self.sprite_atlas.metal_texture(texture_id);
353 color_attachment.set_texture(Some(&texture));
354 color_attachment.set_load_action(metal::MTLLoadAction::Clear);
355 color_attachment.set_store_action(metal::MTLStoreAction::Store);
356 color_attachment.set_clear_color(metal::MTLClearColor::new(0., 0., 0., 1.));
357 let command_encoder = command_buffer.new_render_command_encoder(render_pass_descriptor);
358 command_encoder.set_render_pipeline_state(&self.paths_rasterization_pipeline_state);
359 command_encoder.set_vertex_buffer(
360 PathRasterizationInputIndex::Vertices as u64,
361 Some(&self.instances),
362 *offset as u64,
363 );
364 let texture_size = Size {
365 width: DevicePixels::from(texture.width()),
366 height: DevicePixels::from(texture.height()),
367 };
368 command_encoder.set_vertex_bytes(
369 PathRasterizationInputIndex::AtlasTextureSize as u64,
370 mem::size_of_val(&texture_size) as u64,
371 &texture_size as *const Size<DevicePixels> as *const _,
372 );
373
374 let vertices_bytes_len = mem::size_of::<PathVertex<ScaledPixels>>() * vertices.len();
375 let buffer_contents = unsafe { (self.instances.contents() as *mut u8).add(*offset) };
376 unsafe {
377 ptr::copy_nonoverlapping(
378 vertices.as_ptr() as *const u8,
379 buffer_contents,
380 vertices_bytes_len,
381 );
382 }
383
384 command_encoder.draw_primitives(
385 metal::MTLPrimitiveType::Triangle,
386 0,
387 vertices.len() as u64,
388 );
389 command_encoder.end_encoding();
390 *offset = next_offset;
391 }
392
393 tiles
394 }
395
396 fn draw_shadows(
397 &mut self,
398 shadows: &[Shadow],
399 offset: &mut usize,
400 viewport_size: Size<DevicePixels>,
401 command_encoder: &metal::RenderCommandEncoderRef,
402 ) {
403 if shadows.is_empty() {
404 return;
405 }
406 align_offset(offset);
407
408 command_encoder.set_render_pipeline_state(&self.shadows_pipeline_state);
409 command_encoder.set_vertex_buffer(
410 ShadowInputIndex::Vertices as u64,
411 Some(&self.unit_vertices),
412 0,
413 );
414 command_encoder.set_vertex_buffer(
415 ShadowInputIndex::Shadows as u64,
416 Some(&self.instances),
417 *offset as u64,
418 );
419 command_encoder.set_fragment_buffer(
420 ShadowInputIndex::Shadows as u64,
421 Some(&self.instances),
422 *offset as u64,
423 );
424
425 command_encoder.set_vertex_bytes(
426 ShadowInputIndex::ViewportSize as u64,
427 mem::size_of_val(&viewport_size) as u64,
428 &viewport_size as *const Size<DevicePixels> as *const _,
429 );
430
431 let shadow_bytes_len = mem::size_of::<Shadow>() * shadows.len();
432 let buffer_contents = unsafe { (self.instances.contents() as *mut u8).add(*offset) };
433 unsafe {
434 ptr::copy_nonoverlapping(
435 shadows.as_ptr() as *const u8,
436 buffer_contents,
437 shadow_bytes_len,
438 );
439 }
440
441 let next_offset = *offset + shadow_bytes_len;
442 assert!(
443 next_offset <= INSTANCE_BUFFER_SIZE,
444 "instance buffer exhausted"
445 );
446
447 command_encoder.draw_primitives_instanced(
448 metal::MTLPrimitiveType::Triangle,
449 0,
450 6,
451 shadows.len() as u64,
452 );
453 *offset = next_offset;
454 }
455
456 fn draw_quads(
457 &mut self,
458 quads: &[Quad],
459 offset: &mut usize,
460 viewport_size: Size<DevicePixels>,
461 command_encoder: &metal::RenderCommandEncoderRef,
462 ) {
463 if quads.is_empty() {
464 return;
465 }
466 align_offset(offset);
467
468 command_encoder.set_render_pipeline_state(&self.quads_pipeline_state);
469 command_encoder.set_vertex_buffer(
470 QuadInputIndex::Vertices as u64,
471 Some(&self.unit_vertices),
472 0,
473 );
474 command_encoder.set_vertex_buffer(
475 QuadInputIndex::Quads as u64,
476 Some(&self.instances),
477 *offset as u64,
478 );
479 command_encoder.set_fragment_buffer(
480 QuadInputIndex::Quads as u64,
481 Some(&self.instances),
482 *offset as u64,
483 );
484
485 command_encoder.set_vertex_bytes(
486 QuadInputIndex::ViewportSize as u64,
487 mem::size_of_val(&viewport_size) as u64,
488 &viewport_size as *const Size<DevicePixels> as *const _,
489 );
490
491 let quad_bytes_len = mem::size_of::<Quad>() * quads.len();
492 let buffer_contents = unsafe { (self.instances.contents() as *mut u8).add(*offset) };
493 unsafe {
494 ptr::copy_nonoverlapping(quads.as_ptr() as *const u8, buffer_contents, quad_bytes_len);
495 }
496
497 let next_offset = *offset + quad_bytes_len;
498 assert!(
499 next_offset <= INSTANCE_BUFFER_SIZE,
500 "instance buffer exhausted"
501 );
502
503 command_encoder.draw_primitives_instanced(
504 metal::MTLPrimitiveType::Triangle,
505 0,
506 6,
507 quads.len() as u64,
508 );
509 *offset = next_offset;
510 }
511
512 fn draw_paths(
513 &mut self,
514 paths: &[Path<ScaledPixels>],
515 tiles_by_path_id: &HashMap<PathId, AtlasTile>,
516 offset: &mut usize,
517 viewport_size: Size<DevicePixels>,
518 command_encoder: &metal::RenderCommandEncoderRef,
519 ) {
520 if paths.is_empty() {
521 return;
522 }
523
524 command_encoder.set_render_pipeline_state(&self.path_sprites_pipeline_state);
525 command_encoder.set_vertex_buffer(
526 SpriteInputIndex::Vertices as u64,
527 Some(&self.unit_vertices),
528 0,
529 );
530 command_encoder.set_vertex_bytes(
531 SpriteInputIndex::ViewportSize as u64,
532 mem::size_of_val(&viewport_size) as u64,
533 &viewport_size as *const Size<DevicePixels> as *const _,
534 );
535
536 let mut prev_texture_id = None;
537 let mut sprites = SmallVec::<[_; 1]>::new();
538 let mut paths_and_tiles = paths
539 .into_iter()
540 .map(|path| (path, tiles_by_path_id.get(&path.id).unwrap()))
541 .peekable();
542
543 loop {
544 if let Some((path, tile)) = paths_and_tiles.peek() {
545 if prev_texture_id.map_or(true, |texture_id| texture_id == tile.texture_id) {
546 prev_texture_id = Some(tile.texture_id);
547 let origin = path.bounds.intersect(&path.content_mask.bounds).origin;
548 sprites.push(PathSprite {
549 bounds: Bounds {
550 origin: origin.map(|p| p.floor()),
551 size: tile.bounds.size.map(Into::into),
552 },
553 color: path.color,
554 tile: (*tile).clone(),
555 });
556 paths_and_tiles.next();
557 continue;
558 }
559 }
560
561 if sprites.is_empty() {
562 break;
563 } else {
564 align_offset(offset);
565 let texture_id = prev_texture_id.take().unwrap();
566 let texture: metal::Texture = self.sprite_atlas.metal_texture(texture_id);
567 let texture_size = size(
568 DevicePixels(texture.width() as i32),
569 DevicePixels(texture.height() as i32),
570 );
571
572 command_encoder.set_vertex_buffer(
573 SpriteInputIndex::Sprites as u64,
574 Some(&self.instances),
575 *offset as u64,
576 );
577 command_encoder.set_vertex_bytes(
578 SpriteInputIndex::AtlasTextureSize as u64,
579 mem::size_of_val(&texture_size) as u64,
580 &texture_size as *const Size<DevicePixels> as *const _,
581 );
582 command_encoder.set_fragment_buffer(
583 SpriteInputIndex::Sprites as u64,
584 Some(&self.instances),
585 *offset as u64,
586 );
587 command_encoder
588 .set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
589
590 let sprite_bytes_len = mem::size_of::<MonochromeSprite>() * sprites.len();
591 let buffer_contents =
592 unsafe { (self.instances.contents() as *mut u8).add(*offset) };
593 unsafe {
594 ptr::copy_nonoverlapping(
595 sprites.as_ptr() as *const u8,
596 buffer_contents,
597 sprite_bytes_len,
598 );
599 }
600
601 let next_offset = *offset + sprite_bytes_len;
602 assert!(
603 next_offset <= INSTANCE_BUFFER_SIZE,
604 "instance buffer exhausted"
605 );
606
607 command_encoder.draw_primitives_instanced(
608 metal::MTLPrimitiveType::Triangle,
609 0,
610 6,
611 sprites.len() as u64,
612 );
613 *offset = next_offset;
614 sprites.clear();
615 }
616 }
617 }
618
619 fn draw_underlines(
620 &mut self,
621 underlines: &[Underline],
622 offset: &mut usize,
623 viewport_size: Size<DevicePixels>,
624 command_encoder: &metal::RenderCommandEncoderRef,
625 ) {
626 if underlines.is_empty() {
627 return;
628 }
629 align_offset(offset);
630
631 command_encoder.set_render_pipeline_state(&self.underlines_pipeline_state);
632 command_encoder.set_vertex_buffer(
633 UnderlineInputIndex::Vertices as u64,
634 Some(&self.unit_vertices),
635 0,
636 );
637 command_encoder.set_vertex_buffer(
638 UnderlineInputIndex::Underlines as u64,
639 Some(&self.instances),
640 *offset as u64,
641 );
642 command_encoder.set_fragment_buffer(
643 UnderlineInputIndex::Underlines as u64,
644 Some(&self.instances),
645 *offset as u64,
646 );
647
648 command_encoder.set_vertex_bytes(
649 UnderlineInputIndex::ViewportSize as u64,
650 mem::size_of_val(&viewport_size) as u64,
651 &viewport_size as *const Size<DevicePixels> as *const _,
652 );
653
654 let quad_bytes_len = mem::size_of::<Underline>() * underlines.len();
655 let buffer_contents = unsafe { (self.instances.contents() as *mut u8).add(*offset) };
656 unsafe {
657 ptr::copy_nonoverlapping(
658 underlines.as_ptr() as *const u8,
659 buffer_contents,
660 quad_bytes_len,
661 );
662 }
663
664 let next_offset = *offset + quad_bytes_len;
665 assert!(
666 next_offset <= INSTANCE_BUFFER_SIZE,
667 "instance buffer exhausted"
668 );
669
670 command_encoder.draw_primitives_instanced(
671 metal::MTLPrimitiveType::Triangle,
672 0,
673 6,
674 underlines.len() as u64,
675 );
676 *offset = next_offset;
677 }
678
679 fn draw_monochrome_sprites(
680 &mut self,
681 texture_id: AtlasTextureId,
682 sprites: &[MonochromeSprite],
683 offset: &mut usize,
684 viewport_size: Size<DevicePixels>,
685 command_encoder: &metal::RenderCommandEncoderRef,
686 ) {
687 if sprites.is_empty() {
688 return;
689 }
690 align_offset(offset);
691
692 let texture = self.sprite_atlas.metal_texture(texture_id);
693 let texture_size = size(
694 DevicePixels(texture.width() as i32),
695 DevicePixels(texture.height() as i32),
696 );
697 command_encoder.set_render_pipeline_state(&self.monochrome_sprites_pipeline_state);
698 command_encoder.set_vertex_buffer(
699 SpriteInputIndex::Vertices as u64,
700 Some(&self.unit_vertices),
701 0,
702 );
703 command_encoder.set_vertex_buffer(
704 SpriteInputIndex::Sprites as u64,
705 Some(&self.instances),
706 *offset as u64,
707 );
708 command_encoder.set_vertex_bytes(
709 SpriteInputIndex::ViewportSize as u64,
710 mem::size_of_val(&viewport_size) as u64,
711 &viewport_size as *const Size<DevicePixels> as *const _,
712 );
713 command_encoder.set_vertex_bytes(
714 SpriteInputIndex::AtlasTextureSize as u64,
715 mem::size_of_val(&texture_size) as u64,
716 &texture_size as *const Size<DevicePixels> as *const _,
717 );
718 command_encoder.set_fragment_buffer(
719 SpriteInputIndex::Sprites as u64,
720 Some(&self.instances),
721 *offset as u64,
722 );
723 command_encoder.set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
724
725 let sprite_bytes_len = mem::size_of::<MonochromeSprite>() * sprites.len();
726 let buffer_contents = unsafe { (self.instances.contents() as *mut u8).add(*offset) };
727 unsafe {
728 ptr::copy_nonoverlapping(
729 sprites.as_ptr() as *const u8,
730 buffer_contents,
731 sprite_bytes_len,
732 );
733 }
734
735 let next_offset = *offset + sprite_bytes_len;
736 assert!(
737 next_offset <= INSTANCE_BUFFER_SIZE,
738 "instance buffer exhausted"
739 );
740
741 command_encoder.draw_primitives_instanced(
742 metal::MTLPrimitiveType::Triangle,
743 0,
744 6,
745 sprites.len() as u64,
746 );
747 *offset = next_offset;
748 }
749
750 fn draw_polychrome_sprites(
751 &mut self,
752 texture_id: AtlasTextureId,
753 sprites: &[PolychromeSprite],
754 offset: &mut usize,
755 viewport_size: Size<DevicePixels>,
756 command_encoder: &metal::RenderCommandEncoderRef,
757 ) {
758 if sprites.is_empty() {
759 return;
760 }
761 align_offset(offset);
762
763 let texture = self.sprite_atlas.metal_texture(texture_id);
764 let texture_size = size(
765 DevicePixels(texture.width() as i32),
766 DevicePixels(texture.height() as i32),
767 );
768 command_encoder.set_render_pipeline_state(&self.polychrome_sprites_pipeline_state);
769 command_encoder.set_vertex_buffer(
770 SpriteInputIndex::Vertices as u64,
771 Some(&self.unit_vertices),
772 0,
773 );
774 command_encoder.set_vertex_buffer(
775 SpriteInputIndex::Sprites as u64,
776 Some(&self.instances),
777 *offset as u64,
778 );
779 command_encoder.set_vertex_bytes(
780 SpriteInputIndex::ViewportSize as u64,
781 mem::size_of_val(&viewport_size) as u64,
782 &viewport_size as *const Size<DevicePixels> as *const _,
783 );
784 command_encoder.set_vertex_bytes(
785 SpriteInputIndex::AtlasTextureSize as u64,
786 mem::size_of_val(&texture_size) as u64,
787 &texture_size as *const Size<DevicePixels> as *const _,
788 );
789 command_encoder.set_fragment_buffer(
790 SpriteInputIndex::Sprites as u64,
791 Some(&self.instances),
792 *offset as u64,
793 );
794 command_encoder.set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
795
796 let sprite_bytes_len = mem::size_of::<PolychromeSprite>() * sprites.len();
797 let buffer_contents = unsafe { (self.instances.contents() as *mut u8).add(*offset) };
798 unsafe {
799 ptr::copy_nonoverlapping(
800 sprites.as_ptr() as *const u8,
801 buffer_contents,
802 sprite_bytes_len,
803 );
804 }
805
806 let next_offset = *offset + sprite_bytes_len;
807 assert!(
808 next_offset <= INSTANCE_BUFFER_SIZE,
809 "instance buffer exhausted"
810 );
811
812 command_encoder.draw_primitives_instanced(
813 metal::MTLPrimitiveType::Triangle,
814 0,
815 6,
816 sprites.len() as u64,
817 );
818 *offset = next_offset;
819 }
820
821 fn draw_surfaces(
822 &mut self,
823 surfaces: &[Surface],
824 offset: &mut usize,
825 viewport_size: Size<DevicePixels>,
826 command_encoder: &metal::RenderCommandEncoderRef,
827 ) {
828 command_encoder.set_render_pipeline_state(&self.surfaces_pipeline_state);
829 command_encoder.set_vertex_buffer(
830 SurfaceInputIndex::Vertices as u64,
831 Some(&self.unit_vertices),
832 0,
833 );
834 command_encoder.set_vertex_bytes(
835 SurfaceInputIndex::ViewportSize as u64,
836 mem::size_of_val(&viewport_size) as u64,
837 &viewport_size as *const Size<DevicePixels> as *const _,
838 );
839
840 for surface in surfaces {
841 let texture_size = size(
842 DevicePixels::from(surface.image_buffer.width() as i32),
843 DevicePixels::from(surface.image_buffer.height() as i32),
844 );
845
846 assert_eq!(
847 surface.image_buffer.pixel_format_type(),
848 media::core_video::kCVPixelFormatType_420YpCbCr8BiPlanarFullRange
849 );
850
851 let y_texture = self
852 .core_video_texture_cache
853 .create_texture_from_image(
854 surface.image_buffer.as_concrete_TypeRef(),
855 ptr::null(),
856 MTLPixelFormat::R8Unorm,
857 surface.image_buffer.plane_width(0),
858 surface.image_buffer.plane_height(0),
859 0,
860 )
861 .unwrap();
862 let cb_cr_texture = self
863 .core_video_texture_cache
864 .create_texture_from_image(
865 surface.image_buffer.as_concrete_TypeRef(),
866 ptr::null(),
867 MTLPixelFormat::RG8Unorm,
868 surface.image_buffer.plane_width(1),
869 surface.image_buffer.plane_height(1),
870 1,
871 )
872 .unwrap();
873
874 align_offset(offset);
875 let next_offset = *offset + mem::size_of::<Surface>();
876 assert!(
877 next_offset <= INSTANCE_BUFFER_SIZE,
878 "instance buffer exhausted"
879 );
880
881 command_encoder.set_vertex_buffer(
882 SurfaceInputIndex::Surfaces as u64,
883 Some(&self.instances),
884 *offset as u64,
885 );
886 command_encoder.set_vertex_bytes(
887 SurfaceInputIndex::TextureSize as u64,
888 mem::size_of_val(&texture_size) as u64,
889 &texture_size as *const Size<DevicePixels> as *const _,
890 );
891 command_encoder.set_fragment_texture(
892 SurfaceInputIndex::YTexture as u64,
893 Some(y_texture.as_texture_ref()),
894 );
895 command_encoder.set_fragment_texture(
896 SurfaceInputIndex::CbCrTexture as u64,
897 Some(cb_cr_texture.as_texture_ref()),
898 );
899
900 unsafe {
901 let buffer_contents =
902 (self.instances.contents() as *mut u8).add(*offset) as *mut SurfaceBounds;
903 ptr::write(
904 buffer_contents,
905 SurfaceBounds {
906 bounds: surface.bounds,
907 content_mask: surface.content_mask.clone(),
908 },
909 );
910 }
911
912 command_encoder.draw_primitives(metal::MTLPrimitiveType::Triangle, 0, 6);
913 *offset = next_offset;
914 }
915 }
916}
917
918fn build_pipeline_state(
919 device: &metal::DeviceRef,
920 library: &metal::LibraryRef,
921 label: &str,
922 vertex_fn_name: &str,
923 fragment_fn_name: &str,
924 pixel_format: metal::MTLPixelFormat,
925) -> metal::RenderPipelineState {
926 let vertex_fn = library
927 .get_function(vertex_fn_name, None)
928 .expect("error locating vertex function");
929 let fragment_fn = library
930 .get_function(fragment_fn_name, None)
931 .expect("error locating fragment function");
932
933 let descriptor = metal::RenderPipelineDescriptor::new();
934 descriptor.set_label(label);
935 descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
936 descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
937 let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
938 color_attachment.set_pixel_format(pixel_format);
939 color_attachment.set_blending_enabled(true);
940 color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
941 color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
942 color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::SourceAlpha);
943 color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::One);
944 color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
945 color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::One);
946
947 device
948 .new_render_pipeline_state(&descriptor)
949 .expect("could not create render pipeline state")
950}
951
952fn build_path_rasterization_pipeline_state(
953 device: &metal::DeviceRef,
954 library: &metal::LibraryRef,
955 label: &str,
956 vertex_fn_name: &str,
957 fragment_fn_name: &str,
958 pixel_format: metal::MTLPixelFormat,
959) -> metal::RenderPipelineState {
960 let vertex_fn = library
961 .get_function(vertex_fn_name, None)
962 .expect("error locating vertex function");
963 let fragment_fn = library
964 .get_function(fragment_fn_name, None)
965 .expect("error locating fragment function");
966
967 let descriptor = metal::RenderPipelineDescriptor::new();
968 descriptor.set_label(label);
969 descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
970 descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
971 let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
972 color_attachment.set_pixel_format(pixel_format);
973 color_attachment.set_blending_enabled(true);
974 color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
975 color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
976 color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::One);
977 color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::One);
978 color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::One);
979 color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::One);
980
981 device
982 .new_render_pipeline_state(&descriptor)
983 .expect("could not create render pipeline state")
984}
985
986// Align to multiples of 256 make Metal happy.
987fn align_offset(offset: &mut usize) {
988 *offset = ((*offset + 255) / 256) * 256;
989}
990
991#[repr(C)]
992enum ShadowInputIndex {
993 Vertices = 0,
994 Shadows = 1,
995 ViewportSize = 2,
996}
997
998#[repr(C)]
999enum QuadInputIndex {
1000 Vertices = 0,
1001 Quads = 1,
1002 ViewportSize = 2,
1003}
1004
1005#[repr(C)]
1006enum UnderlineInputIndex {
1007 Vertices = 0,
1008 Underlines = 1,
1009 ViewportSize = 2,
1010}
1011
1012#[repr(C)]
1013enum SpriteInputIndex {
1014 Vertices = 0,
1015 Sprites = 1,
1016 ViewportSize = 2,
1017 AtlasTextureSize = 3,
1018 AtlasTexture = 4,
1019}
1020
1021#[repr(C)]
1022enum SurfaceInputIndex {
1023 Vertices = 0,
1024 Surfaces = 1,
1025 ViewportSize = 2,
1026 TextureSize = 3,
1027 YTexture = 4,
1028 CbCrTexture = 5,
1029}
1030
1031#[repr(C)]
1032enum PathRasterizationInputIndex {
1033 Vertices = 0,
1034 AtlasTextureSize = 1,
1035}
1036
1037#[derive(Clone, Debug, Eq, PartialEq)]
1038#[repr(C)]
1039pub struct PathSprite {
1040 pub bounds: Bounds<ScaledPixels>,
1041 pub color: Hsla,
1042 pub tile: AtlasTile,
1043}
1044
1045#[derive(Clone, Debug, Eq, PartialEq)]
1046#[repr(C)]
1047pub struct SurfaceBounds {
1048 pub bounds: Bounds<ScaledPixels>,
1049 pub content_mask: ContentMask<ScaledPixels>,
1050}