1use crate::{
2 point, size, AtlasTextureId, AtlasTextureKind, AtlasTile, Bounds, ContentMask, DevicePixels,
3 Hsla, MetalAtlas, MonochromeSprite, Path, PathId, PathVertex, PolychromeSprite, PrimitiveBatch,
4 Quad, ScaledPixels, Scene, Shadow, Size, Surface, Underline,
5};
6use cocoa::{
7 base::{NO, YES},
8 foundation::NSUInteger,
9 quartzcore::AutoresizingMask,
10};
11use collections::HashMap;
12use core_foundation::base::TCFType;
13use foreign_types::ForeignType;
14use media::core_video::CVMetalTextureCache;
15use metal::{CommandQueue, MTLPixelFormat, MTLResourceOptions, NSRange};
16use objc::{self, msg_send, sel, sel_impl};
17use smallvec::SmallVec;
18use std::{ffi::c_void, mem, ptr, sync::Arc};
19
20const SHADERS_METALLIB: &[u8] = include_bytes!(concat!(env!("OUT_DIR"), "/shaders.metallib"));
21const INSTANCE_BUFFER_SIZE: usize = 8192 * 1024; // This is an arbitrary decision. There's probably a more optimal value.
22
23pub(crate) struct MetalRenderer {
24 layer: metal::MetalLayer,
25 command_queue: CommandQueue,
26 paths_rasterization_pipeline_state: metal::RenderPipelineState,
27 path_sprites_pipeline_state: metal::RenderPipelineState,
28 shadows_pipeline_state: metal::RenderPipelineState,
29 quads_pipeline_state: metal::RenderPipelineState,
30 underlines_pipeline_state: metal::RenderPipelineState,
31 monochrome_sprites_pipeline_state: metal::RenderPipelineState,
32 polychrome_sprites_pipeline_state: metal::RenderPipelineState,
33 surfaces_pipeline_state: metal::RenderPipelineState,
34 unit_vertices: metal::Buffer,
35 instances: metal::Buffer,
36 sprite_atlas: Arc<MetalAtlas>,
37 core_video_texture_cache: CVMetalTextureCache,
38}
39
40impl MetalRenderer {
41 pub fn new(is_opaque: bool) -> Self {
42 let device: metal::Device = if let Some(device) = metal::Device::system_default() {
43 device
44 } else {
45 log::error!("unable to access a compatible graphics device");
46 std::process::exit(1);
47 };
48
49 let layer = metal::MetalLayer::new();
50 layer.set_device(&device);
51 layer.set_pixel_format(MTLPixelFormat::BGRA8Unorm);
52 layer.set_presents_with_transaction(true);
53 layer.set_opaque(is_opaque);
54 unsafe {
55 let _: () = msg_send![&*layer, setAllowsNextDrawableTimeout: NO];
56 let _: () = msg_send![&*layer, setNeedsDisplayOnBoundsChange: YES];
57 let _: () = msg_send![
58 &*layer,
59 setAutoresizingMask: AutoresizingMask::WIDTH_SIZABLE
60 | AutoresizingMask::HEIGHT_SIZABLE
61 ];
62 }
63
64 let library = device
65 .new_library_with_data(SHADERS_METALLIB)
66 .expect("error building metal library");
67
68 fn to_float2_bits(point: crate::PointF) -> u64 {
69 unsafe {
70 let mut output = mem::transmute::<_, u32>(point.y.to_bits()) as u64;
71 output <<= 32;
72 output |= mem::transmute::<_, u32>(point.x.to_bits()) as u64;
73 output
74 }
75 }
76
77 let unit_vertices = [
78 to_float2_bits(point(0., 0.)),
79 to_float2_bits(point(1., 0.)),
80 to_float2_bits(point(0., 1.)),
81 to_float2_bits(point(0., 1.)),
82 to_float2_bits(point(1., 0.)),
83 to_float2_bits(point(1., 1.)),
84 ];
85 let unit_vertices = device.new_buffer_with_data(
86 unit_vertices.as_ptr() as *const c_void,
87 (unit_vertices.len() * mem::size_of::<u64>()) as u64,
88 MTLResourceOptions::StorageModeManaged,
89 );
90 let instances = device.new_buffer(
91 INSTANCE_BUFFER_SIZE as u64,
92 MTLResourceOptions::StorageModeManaged,
93 );
94
95 let paths_rasterization_pipeline_state = build_path_rasterization_pipeline_state(
96 &device,
97 &library,
98 "paths_rasterization",
99 "path_rasterization_vertex",
100 "path_rasterization_fragment",
101 MTLPixelFormat::R16Float,
102 );
103 let path_sprites_pipeline_state = build_pipeline_state(
104 &device,
105 &library,
106 "path_sprites",
107 "path_sprite_vertex",
108 "path_sprite_fragment",
109 MTLPixelFormat::BGRA8Unorm,
110 );
111 let shadows_pipeline_state = build_pipeline_state(
112 &device,
113 &library,
114 "shadows",
115 "shadow_vertex",
116 "shadow_fragment",
117 MTLPixelFormat::BGRA8Unorm,
118 );
119 let quads_pipeline_state = build_pipeline_state(
120 &device,
121 &library,
122 "quads",
123 "quad_vertex",
124 "quad_fragment",
125 MTLPixelFormat::BGRA8Unorm,
126 );
127 let underlines_pipeline_state = build_pipeline_state(
128 &device,
129 &library,
130 "underlines",
131 "underline_vertex",
132 "underline_fragment",
133 MTLPixelFormat::BGRA8Unorm,
134 );
135 let monochrome_sprites_pipeline_state = build_pipeline_state(
136 &device,
137 &library,
138 "monochrome_sprites",
139 "monochrome_sprite_vertex",
140 "monochrome_sprite_fragment",
141 MTLPixelFormat::BGRA8Unorm,
142 );
143 let polychrome_sprites_pipeline_state = build_pipeline_state(
144 &device,
145 &library,
146 "polychrome_sprites",
147 "polychrome_sprite_vertex",
148 "polychrome_sprite_fragment",
149 MTLPixelFormat::BGRA8Unorm,
150 );
151 let surfaces_pipeline_state = build_pipeline_state(
152 &device,
153 &library,
154 "surfaces",
155 "surface_vertex",
156 "surface_fragment",
157 MTLPixelFormat::BGRA8Unorm,
158 );
159
160 let command_queue = device.new_command_queue();
161 let sprite_atlas = Arc::new(MetalAtlas::new(device.clone()));
162
163 Self {
164 layer,
165 command_queue,
166 paths_rasterization_pipeline_state,
167 path_sprites_pipeline_state,
168 shadows_pipeline_state,
169 quads_pipeline_state,
170 underlines_pipeline_state,
171 monochrome_sprites_pipeline_state,
172 polychrome_sprites_pipeline_state,
173 surfaces_pipeline_state,
174 unit_vertices,
175 instances,
176 sprite_atlas,
177 core_video_texture_cache: CVMetalTextureCache::new(device.as_ptr()).unwrap(),
178 }
179 }
180
181 pub fn layer(&self) -> &metal::MetalLayerRef {
182 &*self.layer
183 }
184
185 pub fn sprite_atlas(&self) -> &Arc<MetalAtlas> {
186 &self.sprite_atlas
187 }
188
189 pub fn draw(&mut self, scene: &Scene) {
190 let layer = self.layer.clone();
191 let viewport_size = layer.drawable_size();
192 let viewport_size: Size<DevicePixels> = size(
193 (viewport_size.width.ceil() as i32).into(),
194 (viewport_size.height.ceil() as i32).into(),
195 );
196 let drawable = if let Some(drawable) = layer.next_drawable() {
197 drawable
198 } else {
199 log::error!(
200 "failed to retrieve next drawable, drawable size: {:?}",
201 viewport_size
202 );
203 return;
204 };
205 let command_queue = self.command_queue.clone();
206 let command_buffer = command_queue.new_command_buffer();
207 let mut instance_offset = 0;
208
209 let path_tiles = self.rasterize_paths(scene.paths(), &mut instance_offset, &command_buffer);
210
211 let render_pass_descriptor = metal::RenderPassDescriptor::new();
212 let color_attachment = render_pass_descriptor
213 .color_attachments()
214 .object_at(0)
215 .unwrap();
216
217 color_attachment.set_texture(Some(drawable.texture()));
218 color_attachment.set_load_action(metal::MTLLoadAction::Clear);
219 color_attachment.set_store_action(metal::MTLStoreAction::Store);
220 let alpha = if self.layer.is_opaque() { 1. } else { 0. };
221 color_attachment.set_clear_color(metal::MTLClearColor::new(0., 0., 0., alpha));
222 let command_encoder = command_buffer.new_render_command_encoder(render_pass_descriptor);
223
224 command_encoder.set_viewport(metal::MTLViewport {
225 originX: 0.0,
226 originY: 0.0,
227 width: i32::from(viewport_size.width) as f64,
228 height: i32::from(viewport_size.height) as f64,
229 znear: 0.0,
230 zfar: 1.0,
231 });
232 for batch in scene.batches() {
233 match batch {
234 PrimitiveBatch::Shadows(shadows) => {
235 self.draw_shadows(
236 shadows,
237 &mut instance_offset,
238 viewport_size,
239 command_encoder,
240 );
241 }
242 PrimitiveBatch::Quads(quads) => {
243 self.draw_quads(quads, &mut instance_offset, viewport_size, command_encoder);
244 }
245 PrimitiveBatch::Paths(paths) => {
246 self.draw_paths(
247 paths,
248 &path_tiles,
249 &mut instance_offset,
250 viewport_size,
251 command_encoder,
252 );
253 }
254 PrimitiveBatch::Underlines(underlines) => {
255 self.draw_underlines(
256 underlines,
257 &mut instance_offset,
258 viewport_size,
259 command_encoder,
260 );
261 }
262 PrimitiveBatch::MonochromeSprites {
263 texture_id,
264 sprites,
265 } => {
266 self.draw_monochrome_sprites(
267 texture_id,
268 sprites,
269 &mut instance_offset,
270 viewport_size,
271 command_encoder,
272 );
273 }
274 PrimitiveBatch::PolychromeSprites {
275 texture_id,
276 sprites,
277 } => {
278 self.draw_polychrome_sprites(
279 texture_id,
280 sprites,
281 &mut instance_offset,
282 viewport_size,
283 command_encoder,
284 );
285 }
286 PrimitiveBatch::Surfaces(surfaces) => {
287 self.draw_surfaces(
288 surfaces,
289 &mut instance_offset,
290 viewport_size,
291 command_encoder,
292 );
293 }
294 }
295 }
296
297 command_encoder.end_encoding();
298
299 self.instances.did_modify_range(NSRange {
300 location: 0,
301 length: instance_offset as NSUInteger,
302 });
303
304 command_buffer.commit();
305 self.sprite_atlas.clear_textures(AtlasTextureKind::Path);
306
307 command_buffer.wait_until_completed();
308 drawable.present();
309 }
310
311 fn rasterize_paths(
312 &mut self,
313 paths: &[Path<ScaledPixels>],
314 offset: &mut usize,
315 command_buffer: &metal::CommandBufferRef,
316 ) -> HashMap<PathId, AtlasTile> {
317 let mut tiles = HashMap::default();
318 let mut vertices_by_texture_id = HashMap::default();
319 for path in paths {
320 let clipped_bounds = path.bounds.intersect(&path.content_mask.bounds);
321
322 let tile = self
323 .sprite_atlas
324 .allocate(clipped_bounds.size.map(Into::into), AtlasTextureKind::Path);
325 vertices_by_texture_id
326 .entry(tile.texture_id)
327 .or_insert(Vec::new())
328 .extend(path.vertices.iter().map(|vertex| PathVertex {
329 xy_position: vertex.xy_position - clipped_bounds.origin
330 + tile.bounds.origin.map(Into::into),
331 st_position: vertex.st_position,
332 content_mask: ContentMask {
333 bounds: tile.bounds.map(Into::into),
334 },
335 }));
336 tiles.insert(path.id, tile);
337 }
338
339 for (texture_id, vertices) in vertices_by_texture_id {
340 align_offset(offset);
341 let next_offset = *offset + vertices.len() * mem::size_of::<PathVertex<ScaledPixels>>();
342 assert!(
343 next_offset <= INSTANCE_BUFFER_SIZE,
344 "instance buffer exhausted"
345 );
346
347 let render_pass_descriptor = metal::RenderPassDescriptor::new();
348 let color_attachment = render_pass_descriptor
349 .color_attachments()
350 .object_at(0)
351 .unwrap();
352
353 let texture = self.sprite_atlas.metal_texture(texture_id);
354 color_attachment.set_texture(Some(&texture));
355 color_attachment.set_load_action(metal::MTLLoadAction::Clear);
356 color_attachment.set_store_action(metal::MTLStoreAction::Store);
357 color_attachment.set_clear_color(metal::MTLClearColor::new(0., 0., 0., 1.));
358 let command_encoder = command_buffer.new_render_command_encoder(render_pass_descriptor);
359 command_encoder.set_render_pipeline_state(&self.paths_rasterization_pipeline_state);
360 command_encoder.set_vertex_buffer(
361 PathRasterizationInputIndex::Vertices as u64,
362 Some(&self.instances),
363 *offset as u64,
364 );
365 let texture_size = Size {
366 width: DevicePixels::from(texture.width()),
367 height: DevicePixels::from(texture.height()),
368 };
369 command_encoder.set_vertex_bytes(
370 PathRasterizationInputIndex::AtlasTextureSize as u64,
371 mem::size_of_val(&texture_size) as u64,
372 &texture_size as *const Size<DevicePixels> as *const _,
373 );
374
375 let vertices_bytes_len = mem::size_of::<PathVertex<ScaledPixels>>() * vertices.len();
376 let buffer_contents = unsafe { (self.instances.contents() as *mut u8).add(*offset) };
377 unsafe {
378 ptr::copy_nonoverlapping(
379 vertices.as_ptr() as *const u8,
380 buffer_contents,
381 vertices_bytes_len,
382 );
383 }
384
385 command_encoder.draw_primitives(
386 metal::MTLPrimitiveType::Triangle,
387 0,
388 vertices.len() as u64,
389 );
390 command_encoder.end_encoding();
391 *offset = next_offset;
392 }
393
394 tiles
395 }
396
397 fn draw_shadows(
398 &mut self,
399 shadows: &[Shadow],
400 offset: &mut usize,
401 viewport_size: Size<DevicePixels>,
402 command_encoder: &metal::RenderCommandEncoderRef,
403 ) {
404 if shadows.is_empty() {
405 return;
406 }
407 align_offset(offset);
408
409 command_encoder.set_render_pipeline_state(&self.shadows_pipeline_state);
410 command_encoder.set_vertex_buffer(
411 ShadowInputIndex::Vertices as u64,
412 Some(&self.unit_vertices),
413 0,
414 );
415 command_encoder.set_vertex_buffer(
416 ShadowInputIndex::Shadows as u64,
417 Some(&self.instances),
418 *offset as u64,
419 );
420 command_encoder.set_fragment_buffer(
421 ShadowInputIndex::Shadows as u64,
422 Some(&self.instances),
423 *offset as u64,
424 );
425
426 command_encoder.set_vertex_bytes(
427 ShadowInputIndex::ViewportSize as u64,
428 mem::size_of_val(&viewport_size) as u64,
429 &viewport_size as *const Size<DevicePixels> as *const _,
430 );
431
432 let shadow_bytes_len = mem::size_of::<Shadow>() * shadows.len();
433 let buffer_contents = unsafe { (self.instances.contents() as *mut u8).add(*offset) };
434 unsafe {
435 ptr::copy_nonoverlapping(
436 shadows.as_ptr() as *const u8,
437 buffer_contents,
438 shadow_bytes_len,
439 );
440 }
441
442 let next_offset = *offset + shadow_bytes_len;
443 assert!(
444 next_offset <= INSTANCE_BUFFER_SIZE,
445 "instance buffer exhausted"
446 );
447
448 command_encoder.draw_primitives_instanced(
449 metal::MTLPrimitiveType::Triangle,
450 0,
451 6,
452 shadows.len() as u64,
453 );
454 *offset = next_offset;
455 }
456
457 fn draw_quads(
458 &mut self,
459 quads: &[Quad],
460 offset: &mut usize,
461 viewport_size: Size<DevicePixels>,
462 command_encoder: &metal::RenderCommandEncoderRef,
463 ) {
464 if quads.is_empty() {
465 return;
466 }
467 align_offset(offset);
468
469 command_encoder.set_render_pipeline_state(&self.quads_pipeline_state);
470 command_encoder.set_vertex_buffer(
471 QuadInputIndex::Vertices as u64,
472 Some(&self.unit_vertices),
473 0,
474 );
475 command_encoder.set_vertex_buffer(
476 QuadInputIndex::Quads as u64,
477 Some(&self.instances),
478 *offset as u64,
479 );
480 command_encoder.set_fragment_buffer(
481 QuadInputIndex::Quads as u64,
482 Some(&self.instances),
483 *offset as u64,
484 );
485
486 command_encoder.set_vertex_bytes(
487 QuadInputIndex::ViewportSize as u64,
488 mem::size_of_val(&viewport_size) as u64,
489 &viewport_size as *const Size<DevicePixels> as *const _,
490 );
491
492 let quad_bytes_len = mem::size_of::<Quad>() * quads.len();
493 let buffer_contents = unsafe { (self.instances.contents() as *mut u8).add(*offset) };
494 unsafe {
495 ptr::copy_nonoverlapping(quads.as_ptr() as *const u8, buffer_contents, quad_bytes_len);
496 }
497
498 let next_offset = *offset + quad_bytes_len;
499 assert!(
500 next_offset <= INSTANCE_BUFFER_SIZE,
501 "instance buffer exhausted"
502 );
503
504 command_encoder.draw_primitives_instanced(
505 metal::MTLPrimitiveType::Triangle,
506 0,
507 6,
508 quads.len() as u64,
509 );
510 *offset = next_offset;
511 }
512
513 fn draw_paths(
514 &mut self,
515 paths: &[Path<ScaledPixels>],
516 tiles_by_path_id: &HashMap<PathId, AtlasTile>,
517 offset: &mut usize,
518 viewport_size: Size<DevicePixels>,
519 command_encoder: &metal::RenderCommandEncoderRef,
520 ) {
521 if paths.is_empty() {
522 return;
523 }
524
525 command_encoder.set_render_pipeline_state(&self.path_sprites_pipeline_state);
526 command_encoder.set_vertex_buffer(
527 SpriteInputIndex::Vertices as u64,
528 Some(&self.unit_vertices),
529 0,
530 );
531 command_encoder.set_vertex_bytes(
532 SpriteInputIndex::ViewportSize as u64,
533 mem::size_of_val(&viewport_size) as u64,
534 &viewport_size as *const Size<DevicePixels> as *const _,
535 );
536
537 let mut prev_texture_id = None;
538 let mut sprites = SmallVec::<[_; 1]>::new();
539 let mut paths_and_tiles = paths
540 .into_iter()
541 .map(|path| (path, tiles_by_path_id.get(&path.id).unwrap()))
542 .peekable();
543
544 loop {
545 if let Some((path, tile)) = paths_and_tiles.peek() {
546 if prev_texture_id.map_or(true, |texture_id| texture_id == tile.texture_id) {
547 prev_texture_id = Some(tile.texture_id);
548 let origin = path.bounds.intersect(&path.content_mask.bounds).origin;
549 sprites.push(PathSprite {
550 bounds: Bounds {
551 origin: origin.map(|p| p.floor()),
552 size: tile.bounds.size.map(Into::into),
553 },
554 color: path.color,
555 tile: (*tile).clone(),
556 });
557 paths_and_tiles.next();
558 continue;
559 }
560 }
561
562 if sprites.is_empty() {
563 break;
564 } else {
565 align_offset(offset);
566 let texture_id = prev_texture_id.take().unwrap();
567 let texture: metal::Texture = self.sprite_atlas.metal_texture(texture_id);
568 let texture_size = size(
569 DevicePixels(texture.width() as i32),
570 DevicePixels(texture.height() as i32),
571 );
572
573 command_encoder.set_vertex_buffer(
574 SpriteInputIndex::Sprites as u64,
575 Some(&self.instances),
576 *offset as u64,
577 );
578 command_encoder.set_vertex_bytes(
579 SpriteInputIndex::AtlasTextureSize as u64,
580 mem::size_of_val(&texture_size) as u64,
581 &texture_size as *const Size<DevicePixels> as *const _,
582 );
583 command_encoder.set_fragment_buffer(
584 SpriteInputIndex::Sprites as u64,
585 Some(&self.instances),
586 *offset as u64,
587 );
588 command_encoder
589 .set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
590
591 let sprite_bytes_len = mem::size_of::<MonochromeSprite>() * sprites.len();
592 let buffer_contents =
593 unsafe { (self.instances.contents() as *mut u8).add(*offset) };
594 unsafe {
595 ptr::copy_nonoverlapping(
596 sprites.as_ptr() as *const u8,
597 buffer_contents,
598 sprite_bytes_len,
599 );
600 }
601
602 let next_offset = *offset + sprite_bytes_len;
603 assert!(
604 next_offset <= INSTANCE_BUFFER_SIZE,
605 "instance buffer exhausted"
606 );
607
608 command_encoder.draw_primitives_instanced(
609 metal::MTLPrimitiveType::Triangle,
610 0,
611 6,
612 sprites.len() as u64,
613 );
614 *offset = next_offset;
615 sprites.clear();
616 }
617 }
618 }
619
620 fn draw_underlines(
621 &mut self,
622 underlines: &[Underline],
623 offset: &mut usize,
624 viewport_size: Size<DevicePixels>,
625 command_encoder: &metal::RenderCommandEncoderRef,
626 ) {
627 if underlines.is_empty() {
628 return;
629 }
630 align_offset(offset);
631
632 command_encoder.set_render_pipeline_state(&self.underlines_pipeline_state);
633 command_encoder.set_vertex_buffer(
634 UnderlineInputIndex::Vertices as u64,
635 Some(&self.unit_vertices),
636 0,
637 );
638 command_encoder.set_vertex_buffer(
639 UnderlineInputIndex::Underlines as u64,
640 Some(&self.instances),
641 *offset as u64,
642 );
643 command_encoder.set_fragment_buffer(
644 UnderlineInputIndex::Underlines as u64,
645 Some(&self.instances),
646 *offset as u64,
647 );
648
649 command_encoder.set_vertex_bytes(
650 UnderlineInputIndex::ViewportSize as u64,
651 mem::size_of_val(&viewport_size) as u64,
652 &viewport_size as *const Size<DevicePixels> as *const _,
653 );
654
655 let quad_bytes_len = mem::size_of::<Underline>() * underlines.len();
656 let buffer_contents = unsafe { (self.instances.contents() as *mut u8).add(*offset) };
657 unsafe {
658 ptr::copy_nonoverlapping(
659 underlines.as_ptr() as *const u8,
660 buffer_contents,
661 quad_bytes_len,
662 );
663 }
664
665 let next_offset = *offset + quad_bytes_len;
666 assert!(
667 next_offset <= INSTANCE_BUFFER_SIZE,
668 "instance buffer exhausted"
669 );
670
671 command_encoder.draw_primitives_instanced(
672 metal::MTLPrimitiveType::Triangle,
673 0,
674 6,
675 underlines.len() as u64,
676 );
677 *offset = next_offset;
678 }
679
680 fn draw_monochrome_sprites(
681 &mut self,
682 texture_id: AtlasTextureId,
683 sprites: &[MonochromeSprite],
684 offset: &mut usize,
685 viewport_size: Size<DevicePixels>,
686 command_encoder: &metal::RenderCommandEncoderRef,
687 ) {
688 if sprites.is_empty() {
689 return;
690 }
691 align_offset(offset);
692
693 let texture = self.sprite_atlas.metal_texture(texture_id);
694 let texture_size = size(
695 DevicePixels(texture.width() as i32),
696 DevicePixels(texture.height() as i32),
697 );
698 command_encoder.set_render_pipeline_state(&self.monochrome_sprites_pipeline_state);
699 command_encoder.set_vertex_buffer(
700 SpriteInputIndex::Vertices as u64,
701 Some(&self.unit_vertices),
702 0,
703 );
704 command_encoder.set_vertex_buffer(
705 SpriteInputIndex::Sprites as u64,
706 Some(&self.instances),
707 *offset as u64,
708 );
709 command_encoder.set_vertex_bytes(
710 SpriteInputIndex::ViewportSize as u64,
711 mem::size_of_val(&viewport_size) as u64,
712 &viewport_size as *const Size<DevicePixels> as *const _,
713 );
714 command_encoder.set_vertex_bytes(
715 SpriteInputIndex::AtlasTextureSize as u64,
716 mem::size_of_val(&texture_size) as u64,
717 &texture_size as *const Size<DevicePixels> as *const _,
718 );
719 command_encoder.set_fragment_buffer(
720 SpriteInputIndex::Sprites as u64,
721 Some(&self.instances),
722 *offset as u64,
723 );
724 command_encoder.set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
725
726 let sprite_bytes_len = mem::size_of::<MonochromeSprite>() * sprites.len();
727 let buffer_contents = unsafe { (self.instances.contents() as *mut u8).add(*offset) };
728 unsafe {
729 ptr::copy_nonoverlapping(
730 sprites.as_ptr() as *const u8,
731 buffer_contents,
732 sprite_bytes_len,
733 );
734 }
735
736 let next_offset = *offset + sprite_bytes_len;
737 assert!(
738 next_offset <= INSTANCE_BUFFER_SIZE,
739 "instance buffer exhausted"
740 );
741
742 command_encoder.draw_primitives_instanced(
743 metal::MTLPrimitiveType::Triangle,
744 0,
745 6,
746 sprites.len() as u64,
747 );
748 *offset = next_offset;
749 }
750
751 fn draw_polychrome_sprites(
752 &mut self,
753 texture_id: AtlasTextureId,
754 sprites: &[PolychromeSprite],
755 offset: &mut usize,
756 viewport_size: Size<DevicePixels>,
757 command_encoder: &metal::RenderCommandEncoderRef,
758 ) {
759 if sprites.is_empty() {
760 return;
761 }
762 align_offset(offset);
763
764 let texture = self.sprite_atlas.metal_texture(texture_id);
765 let texture_size = size(
766 DevicePixels(texture.width() as i32),
767 DevicePixels(texture.height() as i32),
768 );
769 command_encoder.set_render_pipeline_state(&self.polychrome_sprites_pipeline_state);
770 command_encoder.set_vertex_buffer(
771 SpriteInputIndex::Vertices as u64,
772 Some(&self.unit_vertices),
773 0,
774 );
775 command_encoder.set_vertex_buffer(
776 SpriteInputIndex::Sprites as u64,
777 Some(&self.instances),
778 *offset as u64,
779 );
780 command_encoder.set_vertex_bytes(
781 SpriteInputIndex::ViewportSize as u64,
782 mem::size_of_val(&viewport_size) as u64,
783 &viewport_size as *const Size<DevicePixels> as *const _,
784 );
785 command_encoder.set_vertex_bytes(
786 SpriteInputIndex::AtlasTextureSize as u64,
787 mem::size_of_val(&texture_size) as u64,
788 &texture_size as *const Size<DevicePixels> as *const _,
789 );
790 command_encoder.set_fragment_buffer(
791 SpriteInputIndex::Sprites as u64,
792 Some(&self.instances),
793 *offset as u64,
794 );
795 command_encoder.set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
796
797 let sprite_bytes_len = mem::size_of::<PolychromeSprite>() * sprites.len();
798 let buffer_contents = unsafe { (self.instances.contents() as *mut u8).add(*offset) };
799 unsafe {
800 ptr::copy_nonoverlapping(
801 sprites.as_ptr() as *const u8,
802 buffer_contents,
803 sprite_bytes_len,
804 );
805 }
806
807 let next_offset = *offset + sprite_bytes_len;
808 assert!(
809 next_offset <= INSTANCE_BUFFER_SIZE,
810 "instance buffer exhausted"
811 );
812
813 command_encoder.draw_primitives_instanced(
814 metal::MTLPrimitiveType::Triangle,
815 0,
816 6,
817 sprites.len() as u64,
818 );
819 *offset = next_offset;
820 }
821
822 fn draw_surfaces(
823 &mut self,
824 surfaces: &[Surface],
825 offset: &mut usize,
826 viewport_size: Size<DevicePixels>,
827 command_encoder: &metal::RenderCommandEncoderRef,
828 ) {
829 command_encoder.set_render_pipeline_state(&self.surfaces_pipeline_state);
830 command_encoder.set_vertex_buffer(
831 SurfaceInputIndex::Vertices as u64,
832 Some(&self.unit_vertices),
833 0,
834 );
835 command_encoder.set_vertex_bytes(
836 SurfaceInputIndex::ViewportSize as u64,
837 mem::size_of_val(&viewport_size) as u64,
838 &viewport_size as *const Size<DevicePixels> as *const _,
839 );
840
841 for surface in surfaces {
842 let texture_size = size(
843 DevicePixels::from(surface.image_buffer.width() as i32),
844 DevicePixels::from(surface.image_buffer.height() as i32),
845 );
846
847 assert_eq!(
848 surface.image_buffer.pixel_format_type(),
849 media::core_video::kCVPixelFormatType_420YpCbCr8BiPlanarFullRange
850 );
851
852 let y_texture = self
853 .core_video_texture_cache
854 .create_texture_from_image(
855 surface.image_buffer.as_concrete_TypeRef(),
856 ptr::null(),
857 MTLPixelFormat::R8Unorm,
858 surface.image_buffer.plane_width(0),
859 surface.image_buffer.plane_height(0),
860 0,
861 )
862 .unwrap();
863 let cb_cr_texture = self
864 .core_video_texture_cache
865 .create_texture_from_image(
866 surface.image_buffer.as_concrete_TypeRef(),
867 ptr::null(),
868 MTLPixelFormat::RG8Unorm,
869 surface.image_buffer.plane_width(1),
870 surface.image_buffer.plane_height(1),
871 1,
872 )
873 .unwrap();
874
875 align_offset(offset);
876 let next_offset = *offset + mem::size_of::<Surface>();
877 assert!(
878 next_offset <= INSTANCE_BUFFER_SIZE,
879 "instance buffer exhausted"
880 );
881
882 command_encoder.set_vertex_buffer(
883 SurfaceInputIndex::Surfaces as u64,
884 Some(&self.instances),
885 *offset as u64,
886 );
887 command_encoder.set_vertex_bytes(
888 SurfaceInputIndex::TextureSize as u64,
889 mem::size_of_val(&texture_size) as u64,
890 &texture_size as *const Size<DevicePixels> as *const _,
891 );
892 command_encoder.set_fragment_texture(
893 SurfaceInputIndex::YTexture as u64,
894 Some(y_texture.as_texture_ref()),
895 );
896 command_encoder.set_fragment_texture(
897 SurfaceInputIndex::CbCrTexture as u64,
898 Some(cb_cr_texture.as_texture_ref()),
899 );
900
901 unsafe {
902 let buffer_contents =
903 (self.instances.contents() as *mut u8).add(*offset) as *mut SurfaceBounds;
904 ptr::write(
905 buffer_contents,
906 SurfaceBounds {
907 bounds: surface.bounds,
908 content_mask: surface.content_mask.clone(),
909 },
910 );
911 }
912
913 command_encoder.draw_primitives(metal::MTLPrimitiveType::Triangle, 0, 6);
914 *offset = next_offset;
915 }
916 }
917}
918
919fn build_pipeline_state(
920 device: &metal::DeviceRef,
921 library: &metal::LibraryRef,
922 label: &str,
923 vertex_fn_name: &str,
924 fragment_fn_name: &str,
925 pixel_format: metal::MTLPixelFormat,
926) -> metal::RenderPipelineState {
927 let vertex_fn = library
928 .get_function(vertex_fn_name, None)
929 .expect("error locating vertex function");
930 let fragment_fn = library
931 .get_function(fragment_fn_name, None)
932 .expect("error locating fragment function");
933
934 let descriptor = metal::RenderPipelineDescriptor::new();
935 descriptor.set_label(label);
936 descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
937 descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
938 let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
939 color_attachment.set_pixel_format(pixel_format);
940 color_attachment.set_blending_enabled(true);
941 color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
942 color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
943 color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::SourceAlpha);
944 color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::One);
945 color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
946 color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::One);
947
948 device
949 .new_render_pipeline_state(&descriptor)
950 .expect("could not create render pipeline state")
951}
952
953fn build_path_rasterization_pipeline_state(
954 device: &metal::DeviceRef,
955 library: &metal::LibraryRef,
956 label: &str,
957 vertex_fn_name: &str,
958 fragment_fn_name: &str,
959 pixel_format: metal::MTLPixelFormat,
960) -> metal::RenderPipelineState {
961 let vertex_fn = library
962 .get_function(vertex_fn_name, None)
963 .expect("error locating vertex function");
964 let fragment_fn = library
965 .get_function(fragment_fn_name, None)
966 .expect("error locating fragment function");
967
968 let descriptor = metal::RenderPipelineDescriptor::new();
969 descriptor.set_label(label);
970 descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
971 descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
972 let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
973 color_attachment.set_pixel_format(pixel_format);
974 color_attachment.set_blending_enabled(true);
975 color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
976 color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
977 color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::One);
978 color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::One);
979 color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::One);
980 color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::One);
981
982 device
983 .new_render_pipeline_state(&descriptor)
984 .expect("could not create render pipeline state")
985}
986
987// Align to multiples of 256 make Metal happy.
988fn align_offset(offset: &mut usize) {
989 *offset = ((*offset + 255) / 256) * 256;
990}
991
992#[repr(C)]
993enum ShadowInputIndex {
994 Vertices = 0,
995 Shadows = 1,
996 ViewportSize = 2,
997}
998
999#[repr(C)]
1000enum QuadInputIndex {
1001 Vertices = 0,
1002 Quads = 1,
1003 ViewportSize = 2,
1004}
1005
1006#[repr(C)]
1007enum UnderlineInputIndex {
1008 Vertices = 0,
1009 Underlines = 1,
1010 ViewportSize = 2,
1011}
1012
1013#[repr(C)]
1014enum SpriteInputIndex {
1015 Vertices = 0,
1016 Sprites = 1,
1017 ViewportSize = 2,
1018 AtlasTextureSize = 3,
1019 AtlasTexture = 4,
1020}
1021
1022#[repr(C)]
1023enum SurfaceInputIndex {
1024 Vertices = 0,
1025 Surfaces = 1,
1026 ViewportSize = 2,
1027 TextureSize = 3,
1028 YTexture = 4,
1029 CbCrTexture = 5,
1030}
1031
1032#[repr(C)]
1033enum PathRasterizationInputIndex {
1034 Vertices = 0,
1035 AtlasTextureSize = 1,
1036}
1037
1038#[derive(Clone, Debug, Eq, PartialEq)]
1039#[repr(C)]
1040pub struct PathSprite {
1041 pub bounds: Bounds<ScaledPixels>,
1042 pub color: Hsla,
1043 pub tile: AtlasTile,
1044}
1045
1046#[derive(Clone, Debug, Eq, PartialEq)]
1047#[repr(C)]
1048pub struct SurfaceBounds {
1049 pub bounds: Bounds<ScaledPixels>,
1050 pub content_mask: ContentMask<ScaledPixels>,
1051}