1use crate::{
2 point, size, AtlasTextureId, AtlasTextureKind, AtlasTile, Bounds, ContentMask, DevicePixels,
3 Hsla, MetalAtlas, MonochromeSprite, Path, PathId, PathVertex, PolychromeSprite, PrimitiveBatch,
4 Quad, ScaledPixels, Scene, Shadow, Size, Surface, Underline,
5};
6use cocoa::{
7 base::{NO, YES},
8 foundation::NSUInteger,
9 quartzcore::AutoresizingMask,
10};
11use collections::HashMap;
12use core_foundation::base::TCFType;
13use foreign_types::ForeignType;
14use media::core_video::CVMetalTextureCache;
15use metal::{CommandQueue, MTLPixelFormat, MTLResourceOptions, NSRange};
16use objc::{self, msg_send, sel, sel_impl};
17use smallvec::SmallVec;
18use std::{ffi::c_void, mem, ptr, sync::Arc};
19
20const SHADERS_METALLIB: &[u8] = include_bytes!(concat!(env!("OUT_DIR"), "/shaders.metallib"));
21const INSTANCE_BUFFER_SIZE: usize = 8192 * 1024; // This is an arbitrary decision. There's probably a more optimal value.
22
23pub(crate) struct MetalRenderer {
24 layer: metal::MetalLayer,
25 command_queue: CommandQueue,
26 paths_rasterization_pipeline_state: metal::RenderPipelineState,
27 path_sprites_pipeline_state: metal::RenderPipelineState,
28 shadows_pipeline_state: metal::RenderPipelineState,
29 quads_pipeline_state: metal::RenderPipelineState,
30 underlines_pipeline_state: metal::RenderPipelineState,
31 monochrome_sprites_pipeline_state: metal::RenderPipelineState,
32 polychrome_sprites_pipeline_state: metal::RenderPipelineState,
33 surfaces_pipeline_state: metal::RenderPipelineState,
34 unit_vertices: metal::Buffer,
35 instances: metal::Buffer,
36 sprite_atlas: Arc<MetalAtlas>,
37 core_video_texture_cache: CVMetalTextureCache,
38}
39
40impl MetalRenderer {
41 pub fn new(is_opaque: bool) -> Self {
42 let device: metal::Device = if let Some(device) = metal::Device::system_default() {
43 device
44 } else {
45 log::error!("unable to access a compatible graphics device");
46 std::process::exit(1);
47 };
48
49 let layer = metal::MetalLayer::new();
50 layer.set_device(&device);
51 layer.set_pixel_format(MTLPixelFormat::BGRA8Unorm);
52 layer.set_presents_with_transaction(true);
53 layer.set_opaque(is_opaque);
54 unsafe {
55 let _: () = msg_send![&*layer, setAllowsNextDrawableTimeout: NO];
56 let _: () = msg_send![&*layer, setNeedsDisplayOnBoundsChange: YES];
57 let _: () = msg_send![
58 &*layer,
59 setAutoresizingMask: AutoresizingMask::WIDTH_SIZABLE
60 | AutoresizingMask::HEIGHT_SIZABLE
61 ];
62 }
63
64 let library = device
65 .new_library_with_data(SHADERS_METALLIB)
66 .expect("error building metal library");
67
68 fn to_float2_bits(point: crate::PointF) -> u64 {
69 let mut output = point.y.to_bits() as u64;
70 output <<= 32;
71 output |= point.x.to_bits() as u64;
72 output
73 }
74
75 let unit_vertices = [
76 to_float2_bits(point(0., 0.)),
77 to_float2_bits(point(1., 0.)),
78 to_float2_bits(point(0., 1.)),
79 to_float2_bits(point(0., 1.)),
80 to_float2_bits(point(1., 0.)),
81 to_float2_bits(point(1., 1.)),
82 ];
83 let unit_vertices = device.new_buffer_with_data(
84 unit_vertices.as_ptr() as *const c_void,
85 (unit_vertices.len() * mem::size_of::<u64>()) as u64,
86 MTLResourceOptions::StorageModeManaged,
87 );
88 let instances = device.new_buffer(
89 INSTANCE_BUFFER_SIZE as u64,
90 MTLResourceOptions::StorageModeManaged,
91 );
92
93 let paths_rasterization_pipeline_state = build_path_rasterization_pipeline_state(
94 &device,
95 &library,
96 "paths_rasterization",
97 "path_rasterization_vertex",
98 "path_rasterization_fragment",
99 MTLPixelFormat::R16Float,
100 );
101 let path_sprites_pipeline_state = build_pipeline_state(
102 &device,
103 &library,
104 "path_sprites",
105 "path_sprite_vertex",
106 "path_sprite_fragment",
107 MTLPixelFormat::BGRA8Unorm,
108 );
109 let shadows_pipeline_state = build_pipeline_state(
110 &device,
111 &library,
112 "shadows",
113 "shadow_vertex",
114 "shadow_fragment",
115 MTLPixelFormat::BGRA8Unorm,
116 );
117 let quads_pipeline_state = build_pipeline_state(
118 &device,
119 &library,
120 "quads",
121 "quad_vertex",
122 "quad_fragment",
123 MTLPixelFormat::BGRA8Unorm,
124 );
125 let underlines_pipeline_state = build_pipeline_state(
126 &device,
127 &library,
128 "underlines",
129 "underline_vertex",
130 "underline_fragment",
131 MTLPixelFormat::BGRA8Unorm,
132 );
133 let monochrome_sprites_pipeline_state = build_pipeline_state(
134 &device,
135 &library,
136 "monochrome_sprites",
137 "monochrome_sprite_vertex",
138 "monochrome_sprite_fragment",
139 MTLPixelFormat::BGRA8Unorm,
140 );
141 let polychrome_sprites_pipeline_state = build_pipeline_state(
142 &device,
143 &library,
144 "polychrome_sprites",
145 "polychrome_sprite_vertex",
146 "polychrome_sprite_fragment",
147 MTLPixelFormat::BGRA8Unorm,
148 );
149 let surfaces_pipeline_state = build_pipeline_state(
150 &device,
151 &library,
152 "surfaces",
153 "surface_vertex",
154 "surface_fragment",
155 MTLPixelFormat::BGRA8Unorm,
156 );
157
158 let command_queue = device.new_command_queue();
159 let sprite_atlas = Arc::new(MetalAtlas::new(device.clone()));
160
161 Self {
162 layer,
163 command_queue,
164 paths_rasterization_pipeline_state,
165 path_sprites_pipeline_state,
166 shadows_pipeline_state,
167 quads_pipeline_state,
168 underlines_pipeline_state,
169 monochrome_sprites_pipeline_state,
170 polychrome_sprites_pipeline_state,
171 surfaces_pipeline_state,
172 unit_vertices,
173 instances,
174 sprite_atlas,
175 core_video_texture_cache: unsafe { CVMetalTextureCache::new(device.as_ptr()).unwrap() },
176 }
177 }
178
179 pub fn layer(&self) -> &metal::MetalLayerRef {
180 &self.layer
181 }
182
183 pub fn sprite_atlas(&self) -> &Arc<MetalAtlas> {
184 &self.sprite_atlas
185 }
186
187 pub fn draw(&mut self, scene: &Scene) {
188 let layer = self.layer.clone();
189 let viewport_size = layer.drawable_size();
190 let viewport_size: Size<DevicePixels> = size(
191 (viewport_size.width.ceil() as i32).into(),
192 (viewport_size.height.ceil() as i32).into(),
193 );
194 let drawable = if let Some(drawable) = layer.next_drawable() {
195 drawable
196 } else {
197 log::error!(
198 "failed to retrieve next drawable, drawable size: {:?}",
199 viewport_size
200 );
201 return;
202 };
203 let command_queue = self.command_queue.clone();
204 let command_buffer = command_queue.new_command_buffer();
205 let mut instance_offset = 0;
206
207 let path_tiles = self.rasterize_paths(scene.paths(), &mut instance_offset, command_buffer);
208
209 let render_pass_descriptor = metal::RenderPassDescriptor::new();
210 let color_attachment = render_pass_descriptor
211 .color_attachments()
212 .object_at(0)
213 .unwrap();
214
215 color_attachment.set_texture(Some(drawable.texture()));
216 color_attachment.set_load_action(metal::MTLLoadAction::Clear);
217 color_attachment.set_store_action(metal::MTLStoreAction::Store);
218 let alpha = if self.layer.is_opaque() { 1. } else { 0. };
219 color_attachment.set_clear_color(metal::MTLClearColor::new(0., 0., 0., alpha));
220 let command_encoder = command_buffer.new_render_command_encoder(render_pass_descriptor);
221
222 command_encoder.set_viewport(metal::MTLViewport {
223 originX: 0.0,
224 originY: 0.0,
225 width: i32::from(viewport_size.width) as f64,
226 height: i32::from(viewport_size.height) as f64,
227 znear: 0.0,
228 zfar: 1.0,
229 });
230 for batch in scene.batches() {
231 match batch {
232 PrimitiveBatch::Shadows(shadows) => {
233 self.draw_shadows(
234 shadows,
235 &mut instance_offset,
236 viewport_size,
237 command_encoder,
238 );
239 }
240 PrimitiveBatch::Quads(quads) => {
241 self.draw_quads(quads, &mut instance_offset, viewport_size, command_encoder);
242 }
243 PrimitiveBatch::Paths(paths) => {
244 self.draw_paths(
245 paths,
246 &path_tiles,
247 &mut instance_offset,
248 viewport_size,
249 command_encoder,
250 );
251 }
252 PrimitiveBatch::Underlines(underlines) => {
253 self.draw_underlines(
254 underlines,
255 &mut instance_offset,
256 viewport_size,
257 command_encoder,
258 );
259 }
260 PrimitiveBatch::MonochromeSprites {
261 texture_id,
262 sprites,
263 } => {
264 self.draw_monochrome_sprites(
265 texture_id,
266 sprites,
267 &mut instance_offset,
268 viewport_size,
269 command_encoder,
270 );
271 }
272 PrimitiveBatch::PolychromeSprites {
273 texture_id,
274 sprites,
275 } => {
276 self.draw_polychrome_sprites(
277 texture_id,
278 sprites,
279 &mut instance_offset,
280 viewport_size,
281 command_encoder,
282 );
283 }
284 PrimitiveBatch::Surfaces(surfaces) => {
285 self.draw_surfaces(
286 surfaces,
287 &mut instance_offset,
288 viewport_size,
289 command_encoder,
290 );
291 }
292 }
293 }
294
295 command_encoder.end_encoding();
296
297 self.instances.did_modify_range(NSRange {
298 location: 0,
299 length: instance_offset as NSUInteger,
300 });
301
302 command_buffer.commit();
303 self.sprite_atlas.clear_textures(AtlasTextureKind::Path);
304
305 command_buffer.wait_until_completed();
306 drawable.present();
307 }
308
309 fn rasterize_paths(
310 &mut self,
311 paths: &[Path<ScaledPixels>],
312 offset: &mut usize,
313 command_buffer: &metal::CommandBufferRef,
314 ) -> HashMap<PathId, AtlasTile> {
315 let mut tiles = HashMap::default();
316 let mut vertices_by_texture_id = HashMap::default();
317 for path in paths {
318 let clipped_bounds = path.bounds.intersect(&path.content_mask.bounds);
319
320 let tile = self
321 .sprite_atlas
322 .allocate(clipped_bounds.size.map(Into::into), AtlasTextureKind::Path);
323 vertices_by_texture_id
324 .entry(tile.texture_id)
325 .or_insert(Vec::new())
326 .extend(path.vertices.iter().map(|vertex| PathVertex {
327 xy_position: vertex.xy_position - clipped_bounds.origin
328 + tile.bounds.origin.map(Into::into),
329 st_position: vertex.st_position,
330 content_mask: ContentMask {
331 bounds: tile.bounds.map(Into::into),
332 },
333 }));
334 tiles.insert(path.id, tile);
335 }
336
337 for (texture_id, vertices) in vertices_by_texture_id {
338 align_offset(offset);
339 let next_offset = *offset + vertices.len() * mem::size_of::<PathVertex<ScaledPixels>>();
340 assert!(
341 next_offset <= INSTANCE_BUFFER_SIZE,
342 "instance buffer exhausted"
343 );
344
345 let render_pass_descriptor = metal::RenderPassDescriptor::new();
346 let color_attachment = render_pass_descriptor
347 .color_attachments()
348 .object_at(0)
349 .unwrap();
350
351 let texture = self.sprite_atlas.metal_texture(texture_id);
352 color_attachment.set_texture(Some(&texture));
353 color_attachment.set_load_action(metal::MTLLoadAction::Clear);
354 color_attachment.set_store_action(metal::MTLStoreAction::Store);
355 color_attachment.set_clear_color(metal::MTLClearColor::new(0., 0., 0., 1.));
356 let command_encoder = command_buffer.new_render_command_encoder(render_pass_descriptor);
357 command_encoder.set_render_pipeline_state(&self.paths_rasterization_pipeline_state);
358 command_encoder.set_vertex_buffer(
359 PathRasterizationInputIndex::Vertices as u64,
360 Some(&self.instances),
361 *offset as u64,
362 );
363 let texture_size = Size {
364 width: DevicePixels::from(texture.width()),
365 height: DevicePixels::from(texture.height()),
366 };
367 command_encoder.set_vertex_bytes(
368 PathRasterizationInputIndex::AtlasTextureSize as u64,
369 mem::size_of_val(&texture_size) as u64,
370 &texture_size as *const Size<DevicePixels> as *const _,
371 );
372
373 let vertices_bytes_len = mem::size_of::<PathVertex<ScaledPixels>>() * vertices.len();
374 let buffer_contents = unsafe { (self.instances.contents() as *mut u8).add(*offset) };
375 unsafe {
376 ptr::copy_nonoverlapping(
377 vertices.as_ptr() as *const u8,
378 buffer_contents,
379 vertices_bytes_len,
380 );
381 }
382
383 command_encoder.draw_primitives(
384 metal::MTLPrimitiveType::Triangle,
385 0,
386 vertices.len() as u64,
387 );
388 command_encoder.end_encoding();
389 *offset = next_offset;
390 }
391
392 tiles
393 }
394
395 fn draw_shadows(
396 &mut self,
397 shadows: &[Shadow],
398 offset: &mut usize,
399 viewport_size: Size<DevicePixels>,
400 command_encoder: &metal::RenderCommandEncoderRef,
401 ) {
402 if shadows.is_empty() {
403 return;
404 }
405 align_offset(offset);
406
407 command_encoder.set_render_pipeline_state(&self.shadows_pipeline_state);
408 command_encoder.set_vertex_buffer(
409 ShadowInputIndex::Vertices as u64,
410 Some(&self.unit_vertices),
411 0,
412 );
413 command_encoder.set_vertex_buffer(
414 ShadowInputIndex::Shadows as u64,
415 Some(&self.instances),
416 *offset as u64,
417 );
418 command_encoder.set_fragment_buffer(
419 ShadowInputIndex::Shadows as u64,
420 Some(&self.instances),
421 *offset as u64,
422 );
423
424 command_encoder.set_vertex_bytes(
425 ShadowInputIndex::ViewportSize as u64,
426 mem::size_of_val(&viewport_size) as u64,
427 &viewport_size as *const Size<DevicePixels> as *const _,
428 );
429
430 let shadow_bytes_len = std::mem::size_of_val(shadows);
431 let buffer_contents = unsafe { (self.instances.contents() as *mut u8).add(*offset) };
432 unsafe {
433 ptr::copy_nonoverlapping(
434 shadows.as_ptr() as *const u8,
435 buffer_contents,
436 shadow_bytes_len,
437 );
438 }
439
440 let next_offset = *offset + shadow_bytes_len;
441 assert!(
442 next_offset <= INSTANCE_BUFFER_SIZE,
443 "instance buffer exhausted"
444 );
445
446 command_encoder.draw_primitives_instanced(
447 metal::MTLPrimitiveType::Triangle,
448 0,
449 6,
450 shadows.len() as u64,
451 );
452 *offset = next_offset;
453 }
454
455 fn draw_quads(
456 &mut self,
457 quads: &[Quad],
458 offset: &mut usize,
459 viewport_size: Size<DevicePixels>,
460 command_encoder: &metal::RenderCommandEncoderRef,
461 ) {
462 if quads.is_empty() {
463 return;
464 }
465 align_offset(offset);
466
467 command_encoder.set_render_pipeline_state(&self.quads_pipeline_state);
468 command_encoder.set_vertex_buffer(
469 QuadInputIndex::Vertices as u64,
470 Some(&self.unit_vertices),
471 0,
472 );
473 command_encoder.set_vertex_buffer(
474 QuadInputIndex::Quads as u64,
475 Some(&self.instances),
476 *offset as u64,
477 );
478 command_encoder.set_fragment_buffer(
479 QuadInputIndex::Quads as u64,
480 Some(&self.instances),
481 *offset as u64,
482 );
483
484 command_encoder.set_vertex_bytes(
485 QuadInputIndex::ViewportSize as u64,
486 mem::size_of_val(&viewport_size) as u64,
487 &viewport_size as *const Size<DevicePixels> as *const _,
488 );
489
490 let quad_bytes_len = std::mem::size_of_val(quads);
491 let buffer_contents = unsafe { (self.instances.contents() as *mut u8).add(*offset) };
492 unsafe {
493 ptr::copy_nonoverlapping(quads.as_ptr() as *const u8, buffer_contents, quad_bytes_len);
494 }
495
496 let next_offset = *offset + quad_bytes_len;
497 assert!(
498 next_offset <= INSTANCE_BUFFER_SIZE,
499 "instance buffer exhausted"
500 );
501
502 command_encoder.draw_primitives_instanced(
503 metal::MTLPrimitiveType::Triangle,
504 0,
505 6,
506 quads.len() as u64,
507 );
508 *offset = next_offset;
509 }
510
511 fn draw_paths(
512 &mut self,
513 paths: &[Path<ScaledPixels>],
514 tiles_by_path_id: &HashMap<PathId, AtlasTile>,
515 offset: &mut usize,
516 viewport_size: Size<DevicePixels>,
517 command_encoder: &metal::RenderCommandEncoderRef,
518 ) {
519 if paths.is_empty() {
520 return;
521 }
522
523 command_encoder.set_render_pipeline_state(&self.path_sprites_pipeline_state);
524 command_encoder.set_vertex_buffer(
525 SpriteInputIndex::Vertices as u64,
526 Some(&self.unit_vertices),
527 0,
528 );
529 command_encoder.set_vertex_bytes(
530 SpriteInputIndex::ViewportSize as u64,
531 mem::size_of_val(&viewport_size) as u64,
532 &viewport_size as *const Size<DevicePixels> as *const _,
533 );
534
535 let mut prev_texture_id = None;
536 let mut sprites = SmallVec::<[_; 1]>::new();
537 let mut paths_and_tiles = paths
538 .iter()
539 .map(|path| (path, tiles_by_path_id.get(&path.id).unwrap()))
540 .peekable();
541
542 loop {
543 if let Some((path, tile)) = paths_and_tiles.peek() {
544 if prev_texture_id.map_or(true, |texture_id| texture_id == tile.texture_id) {
545 prev_texture_id = Some(tile.texture_id);
546 let origin = path.bounds.intersect(&path.content_mask.bounds).origin;
547 sprites.push(PathSprite {
548 bounds: Bounds {
549 origin: origin.map(|p| p.floor()),
550 size: tile.bounds.size.map(Into::into),
551 },
552 color: path.color,
553 tile: (*tile).clone(),
554 });
555 paths_and_tiles.next();
556 continue;
557 }
558 }
559
560 if sprites.is_empty() {
561 break;
562 } else {
563 align_offset(offset);
564 let texture_id = prev_texture_id.take().unwrap();
565 let texture: metal::Texture = self.sprite_atlas.metal_texture(texture_id);
566 let texture_size = size(
567 DevicePixels(texture.width() as i32),
568 DevicePixels(texture.height() as i32),
569 );
570
571 command_encoder.set_vertex_buffer(
572 SpriteInputIndex::Sprites as u64,
573 Some(&self.instances),
574 *offset as u64,
575 );
576 command_encoder.set_vertex_bytes(
577 SpriteInputIndex::AtlasTextureSize as u64,
578 mem::size_of_val(&texture_size) as u64,
579 &texture_size as *const Size<DevicePixels> as *const _,
580 );
581 command_encoder.set_fragment_buffer(
582 SpriteInputIndex::Sprites as u64,
583 Some(&self.instances),
584 *offset as u64,
585 );
586 command_encoder
587 .set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
588
589 let sprite_bytes_len = mem::size_of::<MonochromeSprite>() * sprites.len();
590 let buffer_contents =
591 unsafe { (self.instances.contents() as *mut u8).add(*offset) };
592 unsafe {
593 ptr::copy_nonoverlapping(
594 sprites.as_ptr() as *const u8,
595 buffer_contents,
596 sprite_bytes_len,
597 );
598 }
599
600 let next_offset = *offset + sprite_bytes_len;
601 assert!(
602 next_offset <= INSTANCE_BUFFER_SIZE,
603 "instance buffer exhausted"
604 );
605
606 command_encoder.draw_primitives_instanced(
607 metal::MTLPrimitiveType::Triangle,
608 0,
609 6,
610 sprites.len() as u64,
611 );
612 *offset = next_offset;
613 sprites.clear();
614 }
615 }
616 }
617
618 fn draw_underlines(
619 &mut self,
620 underlines: &[Underline],
621 offset: &mut usize,
622 viewport_size: Size<DevicePixels>,
623 command_encoder: &metal::RenderCommandEncoderRef,
624 ) {
625 if underlines.is_empty() {
626 return;
627 }
628 align_offset(offset);
629
630 command_encoder.set_render_pipeline_state(&self.underlines_pipeline_state);
631 command_encoder.set_vertex_buffer(
632 UnderlineInputIndex::Vertices as u64,
633 Some(&self.unit_vertices),
634 0,
635 );
636 command_encoder.set_vertex_buffer(
637 UnderlineInputIndex::Underlines as u64,
638 Some(&self.instances),
639 *offset as u64,
640 );
641 command_encoder.set_fragment_buffer(
642 UnderlineInputIndex::Underlines as u64,
643 Some(&self.instances),
644 *offset as u64,
645 );
646
647 command_encoder.set_vertex_bytes(
648 UnderlineInputIndex::ViewportSize as u64,
649 mem::size_of_val(&viewport_size) as u64,
650 &viewport_size as *const Size<DevicePixels> as *const _,
651 );
652
653 let quad_bytes_len = std::mem::size_of_val(underlines);
654 let buffer_contents = unsafe { (self.instances.contents() as *mut u8).add(*offset) };
655 unsafe {
656 ptr::copy_nonoverlapping(
657 underlines.as_ptr() as *const u8,
658 buffer_contents,
659 quad_bytes_len,
660 );
661 }
662
663 let next_offset = *offset + quad_bytes_len;
664 assert!(
665 next_offset <= INSTANCE_BUFFER_SIZE,
666 "instance buffer exhausted"
667 );
668
669 command_encoder.draw_primitives_instanced(
670 metal::MTLPrimitiveType::Triangle,
671 0,
672 6,
673 underlines.len() as u64,
674 );
675 *offset = next_offset;
676 }
677
678 fn draw_monochrome_sprites(
679 &mut self,
680 texture_id: AtlasTextureId,
681 sprites: &[MonochromeSprite],
682 offset: &mut usize,
683 viewport_size: Size<DevicePixels>,
684 command_encoder: &metal::RenderCommandEncoderRef,
685 ) {
686 if sprites.is_empty() {
687 return;
688 }
689 align_offset(offset);
690
691 let texture = self.sprite_atlas.metal_texture(texture_id);
692 let texture_size = size(
693 DevicePixels(texture.width() as i32),
694 DevicePixels(texture.height() as i32),
695 );
696 command_encoder.set_render_pipeline_state(&self.monochrome_sprites_pipeline_state);
697 command_encoder.set_vertex_buffer(
698 SpriteInputIndex::Vertices as u64,
699 Some(&self.unit_vertices),
700 0,
701 );
702 command_encoder.set_vertex_buffer(
703 SpriteInputIndex::Sprites as u64,
704 Some(&self.instances),
705 *offset as u64,
706 );
707 command_encoder.set_vertex_bytes(
708 SpriteInputIndex::ViewportSize as u64,
709 mem::size_of_val(&viewport_size) as u64,
710 &viewport_size as *const Size<DevicePixels> as *const _,
711 );
712 command_encoder.set_vertex_bytes(
713 SpriteInputIndex::AtlasTextureSize as u64,
714 mem::size_of_val(&texture_size) as u64,
715 &texture_size as *const Size<DevicePixels> as *const _,
716 );
717 command_encoder.set_fragment_buffer(
718 SpriteInputIndex::Sprites as u64,
719 Some(&self.instances),
720 *offset as u64,
721 );
722 command_encoder.set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
723
724 let sprite_bytes_len = std::mem::size_of_val(sprites);
725 let buffer_contents = unsafe { (self.instances.contents() as *mut u8).add(*offset) };
726 unsafe {
727 ptr::copy_nonoverlapping(
728 sprites.as_ptr() as *const u8,
729 buffer_contents,
730 sprite_bytes_len,
731 );
732 }
733
734 let next_offset = *offset + sprite_bytes_len;
735 assert!(
736 next_offset <= INSTANCE_BUFFER_SIZE,
737 "instance buffer exhausted"
738 );
739
740 command_encoder.draw_primitives_instanced(
741 metal::MTLPrimitiveType::Triangle,
742 0,
743 6,
744 sprites.len() as u64,
745 );
746 *offset = next_offset;
747 }
748
749 fn draw_polychrome_sprites(
750 &mut self,
751 texture_id: AtlasTextureId,
752 sprites: &[PolychromeSprite],
753 offset: &mut usize,
754 viewport_size: Size<DevicePixels>,
755 command_encoder: &metal::RenderCommandEncoderRef,
756 ) {
757 if sprites.is_empty() {
758 return;
759 }
760 align_offset(offset);
761
762 let texture = self.sprite_atlas.metal_texture(texture_id);
763 let texture_size = size(
764 DevicePixels(texture.width() as i32),
765 DevicePixels(texture.height() as i32),
766 );
767 command_encoder.set_render_pipeline_state(&self.polychrome_sprites_pipeline_state);
768 command_encoder.set_vertex_buffer(
769 SpriteInputIndex::Vertices as u64,
770 Some(&self.unit_vertices),
771 0,
772 );
773 command_encoder.set_vertex_buffer(
774 SpriteInputIndex::Sprites as u64,
775 Some(&self.instances),
776 *offset as u64,
777 );
778 command_encoder.set_vertex_bytes(
779 SpriteInputIndex::ViewportSize as u64,
780 mem::size_of_val(&viewport_size) as u64,
781 &viewport_size as *const Size<DevicePixels> as *const _,
782 );
783 command_encoder.set_vertex_bytes(
784 SpriteInputIndex::AtlasTextureSize as u64,
785 mem::size_of_val(&texture_size) as u64,
786 &texture_size as *const Size<DevicePixels> as *const _,
787 );
788 command_encoder.set_fragment_buffer(
789 SpriteInputIndex::Sprites as u64,
790 Some(&self.instances),
791 *offset as u64,
792 );
793 command_encoder.set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
794
795 let sprite_bytes_len = std::mem::size_of_val(sprites);
796 let buffer_contents = unsafe { (self.instances.contents() as *mut u8).add(*offset) };
797 unsafe {
798 ptr::copy_nonoverlapping(
799 sprites.as_ptr() as *const u8,
800 buffer_contents,
801 sprite_bytes_len,
802 );
803 }
804
805 let next_offset = *offset + sprite_bytes_len;
806 assert!(
807 next_offset <= INSTANCE_BUFFER_SIZE,
808 "instance buffer exhausted"
809 );
810
811 command_encoder.draw_primitives_instanced(
812 metal::MTLPrimitiveType::Triangle,
813 0,
814 6,
815 sprites.len() as u64,
816 );
817 *offset = next_offset;
818 }
819
820 fn draw_surfaces(
821 &mut self,
822 surfaces: &[Surface],
823 offset: &mut usize,
824 viewport_size: Size<DevicePixels>,
825 command_encoder: &metal::RenderCommandEncoderRef,
826 ) {
827 command_encoder.set_render_pipeline_state(&self.surfaces_pipeline_state);
828 command_encoder.set_vertex_buffer(
829 SurfaceInputIndex::Vertices as u64,
830 Some(&self.unit_vertices),
831 0,
832 );
833 command_encoder.set_vertex_bytes(
834 SurfaceInputIndex::ViewportSize as u64,
835 mem::size_of_val(&viewport_size) as u64,
836 &viewport_size as *const Size<DevicePixels> as *const _,
837 );
838
839 for surface in surfaces {
840 let texture_size = size(
841 DevicePixels::from(surface.image_buffer.width() as i32),
842 DevicePixels::from(surface.image_buffer.height() as i32),
843 );
844
845 assert_eq!(
846 surface.image_buffer.pixel_format_type(),
847 media::core_video::kCVPixelFormatType_420YpCbCr8BiPlanarFullRange
848 );
849
850 let y_texture = unsafe {
851 self.core_video_texture_cache
852 .create_texture_from_image(
853 surface.image_buffer.as_concrete_TypeRef(),
854 ptr::null(),
855 MTLPixelFormat::R8Unorm,
856 surface.image_buffer.plane_width(0),
857 surface.image_buffer.plane_height(0),
858 0,
859 )
860 .unwrap()
861 };
862 let cb_cr_texture = unsafe {
863 self.core_video_texture_cache
864 .create_texture_from_image(
865 surface.image_buffer.as_concrete_TypeRef(),
866 ptr::null(),
867 MTLPixelFormat::RG8Unorm,
868 surface.image_buffer.plane_width(1),
869 surface.image_buffer.plane_height(1),
870 1,
871 )
872 .unwrap()
873 };
874
875 align_offset(offset);
876 let next_offset = *offset + mem::size_of::<Surface>();
877 assert!(
878 next_offset <= INSTANCE_BUFFER_SIZE,
879 "instance buffer exhausted"
880 );
881
882 command_encoder.set_vertex_buffer(
883 SurfaceInputIndex::Surfaces as u64,
884 Some(&self.instances),
885 *offset as u64,
886 );
887 command_encoder.set_vertex_bytes(
888 SurfaceInputIndex::TextureSize as u64,
889 mem::size_of_val(&texture_size) as u64,
890 &texture_size as *const Size<DevicePixels> as *const _,
891 );
892 command_encoder.set_fragment_texture(
893 SurfaceInputIndex::YTexture as u64,
894 Some(y_texture.as_texture_ref()),
895 );
896 command_encoder.set_fragment_texture(
897 SurfaceInputIndex::CbCrTexture as u64,
898 Some(cb_cr_texture.as_texture_ref()),
899 );
900
901 unsafe {
902 let buffer_contents =
903 (self.instances.contents() as *mut u8).add(*offset) as *mut SurfaceBounds;
904 ptr::write(
905 buffer_contents,
906 SurfaceBounds {
907 bounds: surface.bounds,
908 content_mask: surface.content_mask.clone(),
909 },
910 );
911 }
912
913 command_encoder.draw_primitives(metal::MTLPrimitiveType::Triangle, 0, 6);
914 *offset = next_offset;
915 }
916 }
917}
918
919fn build_pipeline_state(
920 device: &metal::DeviceRef,
921 library: &metal::LibraryRef,
922 label: &str,
923 vertex_fn_name: &str,
924 fragment_fn_name: &str,
925 pixel_format: metal::MTLPixelFormat,
926) -> metal::RenderPipelineState {
927 let vertex_fn = library
928 .get_function(vertex_fn_name, None)
929 .expect("error locating vertex function");
930 let fragment_fn = library
931 .get_function(fragment_fn_name, None)
932 .expect("error locating fragment function");
933
934 let descriptor = metal::RenderPipelineDescriptor::new();
935 descriptor.set_label(label);
936 descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
937 descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
938 let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
939 color_attachment.set_pixel_format(pixel_format);
940 color_attachment.set_blending_enabled(true);
941 color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
942 color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
943 color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::SourceAlpha);
944 color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::One);
945 color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
946 color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::One);
947
948 device
949 .new_render_pipeline_state(&descriptor)
950 .expect("could not create render pipeline state")
951}
952
953fn build_path_rasterization_pipeline_state(
954 device: &metal::DeviceRef,
955 library: &metal::LibraryRef,
956 label: &str,
957 vertex_fn_name: &str,
958 fragment_fn_name: &str,
959 pixel_format: metal::MTLPixelFormat,
960) -> metal::RenderPipelineState {
961 let vertex_fn = library
962 .get_function(vertex_fn_name, None)
963 .expect("error locating vertex function");
964 let fragment_fn = library
965 .get_function(fragment_fn_name, None)
966 .expect("error locating fragment function");
967
968 let descriptor = metal::RenderPipelineDescriptor::new();
969 descriptor.set_label(label);
970 descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
971 descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
972 let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
973 color_attachment.set_pixel_format(pixel_format);
974 color_attachment.set_blending_enabled(true);
975 color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
976 color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
977 color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::One);
978 color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::One);
979 color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::One);
980 color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::One);
981
982 device
983 .new_render_pipeline_state(&descriptor)
984 .expect("could not create render pipeline state")
985}
986
987// Align to multiples of 256 make Metal happy.
988fn align_offset(offset: &mut usize) {
989 *offset = ((*offset + 255) / 256) * 256;
990}
991
992#[repr(C)]
993enum ShadowInputIndex {
994 Vertices = 0,
995 Shadows = 1,
996 ViewportSize = 2,
997}
998
999#[repr(C)]
1000enum QuadInputIndex {
1001 Vertices = 0,
1002 Quads = 1,
1003 ViewportSize = 2,
1004}
1005
1006#[repr(C)]
1007enum UnderlineInputIndex {
1008 Vertices = 0,
1009 Underlines = 1,
1010 ViewportSize = 2,
1011}
1012
1013#[repr(C)]
1014enum SpriteInputIndex {
1015 Vertices = 0,
1016 Sprites = 1,
1017 ViewportSize = 2,
1018 AtlasTextureSize = 3,
1019 AtlasTexture = 4,
1020}
1021
1022#[repr(C)]
1023enum SurfaceInputIndex {
1024 Vertices = 0,
1025 Surfaces = 1,
1026 ViewportSize = 2,
1027 TextureSize = 3,
1028 YTexture = 4,
1029 CbCrTexture = 5,
1030}
1031
1032#[repr(C)]
1033enum PathRasterizationInputIndex {
1034 Vertices = 0,
1035 AtlasTextureSize = 1,
1036}
1037
1038#[derive(Clone, Debug, Eq, PartialEq)]
1039#[repr(C)]
1040pub struct PathSprite {
1041 pub bounds: Bounds<ScaledPixels>,
1042 pub color: Hsla,
1043 pub tile: AtlasTile,
1044}
1045
1046#[derive(Clone, Debug, Eq, PartialEq)]
1047#[repr(C)]
1048pub struct SurfaceBounds {
1049 pub bounds: Bounds<ScaledPixels>,
1050 pub content_mask: ContentMask<ScaledPixels>,
1051}