1use crate::{
2 point, size, AtlasTextureId, AtlasTextureKind, AtlasTile, Bounds, ContentMask, DevicePixels,
3 Hsla, MetalAtlas, MonochromeSprite, Path, PathId, PathVertex, PolychromeSprite, PrimitiveBatch,
4 Quad, ScaledPixels, Scene, Shadow, Size, Surface, Underline,
5};
6use cocoa::{
7 base::{NO, YES},
8 foundation::NSUInteger,
9 quartzcore::AutoresizingMask,
10};
11use collections::HashMap;
12use core_foundation::base::TCFType;
13use foreign_types::ForeignType;
14use media::core_video::CVMetalTextureCache;
15use metal::{CommandQueue, MTLPixelFormat, MTLResourceOptions, NSRange};
16use objc::{self, msg_send, sel, sel_impl};
17use smallvec::SmallVec;
18use std::{ffi::c_void, mem, ptr, sync::Arc};
19
20const SHADERS_METALLIB: &[u8] = include_bytes!(concat!(env!("OUT_DIR"), "/shaders.metallib"));
21const INSTANCE_BUFFER_SIZE: usize = 8192 * 1024; // This is an arbitrary decision. There's probably a more optimal value.
22
23pub(crate) struct MetalRenderer {
24 layer: metal::MetalLayer,
25 command_queue: CommandQueue,
26 paths_rasterization_pipeline_state: metal::RenderPipelineState,
27 path_sprites_pipeline_state: metal::RenderPipelineState,
28 shadows_pipeline_state: metal::RenderPipelineState,
29 quads_pipeline_state: metal::RenderPipelineState,
30 underlines_pipeline_state: metal::RenderPipelineState,
31 monochrome_sprites_pipeline_state: metal::RenderPipelineState,
32 polychrome_sprites_pipeline_state: metal::RenderPipelineState,
33 surfaces_pipeline_state: metal::RenderPipelineState,
34 unit_vertices: metal::Buffer,
35 instances: metal::Buffer,
36 sprite_atlas: Arc<MetalAtlas>,
37 core_video_texture_cache: CVMetalTextureCache,
38}
39
40impl MetalRenderer {
41 pub fn new(is_opaque: bool) -> Self {
42 let device: metal::Device = if let Some(device) = metal::Device::system_default() {
43 device
44 } else {
45 log::error!("unable to access a compatible graphics device");
46 std::process::exit(1);
47 };
48
49 let layer = metal::MetalLayer::new();
50 layer.set_device(&device);
51 layer.set_pixel_format(MTLPixelFormat::BGRA8Unorm);
52 layer.set_presents_with_transaction(true);
53 layer.set_opaque(is_opaque);
54 unsafe {
55 let _: () = msg_send![&*layer, setAllowsNextDrawableTimeout: NO];
56 let _: () = msg_send![&*layer, setNeedsDisplayOnBoundsChange: YES];
57 let _: () = msg_send![
58 &*layer,
59 setAutoresizingMask: AutoresizingMask::WIDTH_SIZABLE
60 | AutoresizingMask::HEIGHT_SIZABLE
61 ];
62 }
63
64 let library = device
65 .new_library_with_data(SHADERS_METALLIB)
66 .expect("error building metal library");
67
68 fn to_float2_bits(point: crate::PointF) -> u64 {
69 unsafe {
70 let mut output = mem::transmute::<_, u32>(point.y.to_bits()) as u64;
71 output <<= 32;
72 output |= mem::transmute::<_, u32>(point.x.to_bits()) as u64;
73 output
74 }
75 }
76
77 let unit_vertices = [
78 to_float2_bits(point(0., 0.)),
79 to_float2_bits(point(1., 0.)),
80 to_float2_bits(point(0., 1.)),
81 to_float2_bits(point(0., 1.)),
82 to_float2_bits(point(1., 0.)),
83 to_float2_bits(point(1., 1.)),
84 ];
85 let unit_vertices = device.new_buffer_with_data(
86 unit_vertices.as_ptr() as *const c_void,
87 (unit_vertices.len() * mem::size_of::<u64>()) as u64,
88 MTLResourceOptions::StorageModeManaged,
89 );
90 let instances = device.new_buffer(
91 INSTANCE_BUFFER_SIZE as u64,
92 MTLResourceOptions::StorageModeManaged,
93 );
94
95 let paths_rasterization_pipeline_state = build_path_rasterization_pipeline_state(
96 &device,
97 &library,
98 "paths_rasterization",
99 "path_rasterization_vertex",
100 "path_rasterization_fragment",
101 MTLPixelFormat::R16Float,
102 );
103 let path_sprites_pipeline_state = build_pipeline_state(
104 &device,
105 &library,
106 "path_sprites",
107 "path_sprite_vertex",
108 "path_sprite_fragment",
109 MTLPixelFormat::BGRA8Unorm,
110 );
111 let shadows_pipeline_state = build_pipeline_state(
112 &device,
113 &library,
114 "shadows",
115 "shadow_vertex",
116 "shadow_fragment",
117 MTLPixelFormat::BGRA8Unorm,
118 );
119 let quads_pipeline_state = build_pipeline_state(
120 &device,
121 &library,
122 "quads",
123 "quad_vertex",
124 "quad_fragment",
125 MTLPixelFormat::BGRA8Unorm,
126 );
127 let underlines_pipeline_state = build_pipeline_state(
128 &device,
129 &library,
130 "underlines",
131 "underline_vertex",
132 "underline_fragment",
133 MTLPixelFormat::BGRA8Unorm,
134 );
135 let monochrome_sprites_pipeline_state = build_pipeline_state(
136 &device,
137 &library,
138 "monochrome_sprites",
139 "monochrome_sprite_vertex",
140 "monochrome_sprite_fragment",
141 MTLPixelFormat::BGRA8Unorm,
142 );
143 let polychrome_sprites_pipeline_state = build_pipeline_state(
144 &device,
145 &library,
146 "polychrome_sprites",
147 "polychrome_sprite_vertex",
148 "polychrome_sprite_fragment",
149 MTLPixelFormat::BGRA8Unorm,
150 );
151 let surfaces_pipeline_state = build_pipeline_state(
152 &device,
153 &library,
154 "surfaces",
155 "surface_vertex",
156 "surface_fragment",
157 MTLPixelFormat::BGRA8Unorm,
158 );
159
160 let command_queue = device.new_command_queue();
161 let sprite_atlas = Arc::new(MetalAtlas::new(device.clone()));
162
163 Self {
164 layer,
165 command_queue,
166 paths_rasterization_pipeline_state,
167 path_sprites_pipeline_state,
168 shadows_pipeline_state,
169 quads_pipeline_state,
170 underlines_pipeline_state,
171 monochrome_sprites_pipeline_state,
172 polychrome_sprites_pipeline_state,
173 surfaces_pipeline_state,
174 unit_vertices,
175 instances,
176 sprite_atlas,
177 core_video_texture_cache: CVMetalTextureCache::new(device.as_ptr()).unwrap(),
178 }
179 }
180
181 pub fn layer(&self) -> &metal::MetalLayerRef {
182 &*self.layer
183 }
184
185 pub fn sprite_atlas(&self) -> &Arc<MetalAtlas> {
186 &self.sprite_atlas
187 }
188
189 pub fn draw(&mut self, scene: &Scene) {
190 let layer = self.layer.clone();
191 let viewport_size = layer.drawable_size();
192 let viewport_size: Size<DevicePixels> = size(
193 (viewport_size.width.ceil() as i32).into(),
194 (viewport_size.height.ceil() as i32).into(),
195 );
196 let drawable = if let Some(drawable) = layer.next_drawable() {
197 drawable
198 } else {
199 log::error!(
200 "failed to retrieve next drawable, drawable size: {:?}",
201 viewport_size
202 );
203 return;
204 };
205 let command_queue = self.command_queue.clone();
206 let command_buffer = command_queue.new_command_buffer();
207 let mut instance_offset = 0;
208
209 let path_tiles = self.rasterize_paths(scene.paths(), &mut instance_offset, &command_buffer);
210
211 let render_pass_descriptor = metal::RenderPassDescriptor::new();
212 let color_attachment = render_pass_descriptor
213 .color_attachments()
214 .object_at(0)
215 .unwrap();
216
217 color_attachment.set_texture(Some(drawable.texture()));
218 color_attachment.set_load_action(metal::MTLLoadAction::Clear);
219 color_attachment.set_store_action(metal::MTLStoreAction::Store);
220 let alpha = if self.layer.is_opaque() { 1. } else { 0. };
221 color_attachment.set_clear_color(metal::MTLClearColor::new(0., 0., 0., alpha));
222 let command_encoder = command_buffer.new_render_command_encoder(render_pass_descriptor);
223
224 command_encoder.set_viewport(metal::MTLViewport {
225 originX: 0.0,
226 originY: 0.0,
227 width: i32::from(viewport_size.width) as f64,
228 height: i32::from(viewport_size.height) as f64,
229 znear: 0.0,
230 zfar: 1.0,
231 });
232 for batch in scene.batches() {
233 match batch {
234 PrimitiveBatch::Shadows(shadows) => {
235 self.draw_shadows(
236 shadows,
237 &mut instance_offset,
238 viewport_size,
239 command_encoder,
240 );
241 }
242 PrimitiveBatch::Quads(quads) => {
243 self.draw_quads(quads, &mut instance_offset, viewport_size, command_encoder);
244 }
245 PrimitiveBatch::Paths(paths) => {
246 self.draw_paths(
247 paths,
248 &path_tiles,
249 &mut instance_offset,
250 viewport_size,
251 command_encoder,
252 );
253 }
254 PrimitiveBatch::Underlines(underlines) => {
255 self.draw_underlines(
256 underlines,
257 &mut instance_offset,
258 viewport_size,
259 command_encoder,
260 );
261 }
262 PrimitiveBatch::MonochromeSprites {
263 texture_id,
264 sprites,
265 } => {
266 self.draw_monochrome_sprites(
267 texture_id,
268 sprites,
269 &mut instance_offset,
270 viewport_size,
271 command_encoder,
272 );
273 }
274 PrimitiveBatch::PolychromeSprites {
275 texture_id,
276 sprites,
277 } => {
278 self.draw_polychrome_sprites(
279 texture_id,
280 sprites,
281 &mut instance_offset,
282 viewport_size,
283 command_encoder,
284 );
285 }
286 PrimitiveBatch::Surfaces(surfaces) => {
287 self.draw_surfaces(
288 surfaces,
289 &mut instance_offset,
290 viewport_size,
291 command_encoder,
292 );
293 }
294 }
295 }
296
297 command_encoder.end_encoding();
298
299 self.instances.did_modify_range(NSRange {
300 location: 0,
301 length: instance_offset as NSUInteger,
302 });
303
304 command_buffer.commit();
305 self.sprite_atlas.clear_textures(AtlasTextureKind::Path);
306 command_buffer.wait_until_completed();
307 drawable.present();
308 }
309
310 fn rasterize_paths(
311 &mut self,
312 paths: &[Path<ScaledPixels>],
313 offset: &mut usize,
314 command_buffer: &metal::CommandBufferRef,
315 ) -> HashMap<PathId, AtlasTile> {
316 let mut tiles = HashMap::default();
317 let mut vertices_by_texture_id = HashMap::default();
318 for path in paths {
319 let clipped_bounds = path.bounds.intersect(&path.content_mask.bounds);
320
321 let tile = self
322 .sprite_atlas
323 .allocate(clipped_bounds.size.map(Into::into), AtlasTextureKind::Path);
324 vertices_by_texture_id
325 .entry(tile.texture_id)
326 .or_insert(Vec::new())
327 .extend(path.vertices.iter().map(|vertex| PathVertex {
328 xy_position: vertex.xy_position - path.bounds.origin
329 + tile.bounds.origin.map(Into::into),
330 st_position: vertex.st_position,
331 content_mask: ContentMask {
332 bounds: tile.bounds.map(Into::into),
333 },
334 }));
335 tiles.insert(path.id, tile);
336 }
337
338 for (texture_id, vertices) in vertices_by_texture_id {
339 align_offset(offset);
340 let next_offset = *offset + vertices.len() * mem::size_of::<PathVertex<ScaledPixels>>();
341 assert!(
342 next_offset <= INSTANCE_BUFFER_SIZE,
343 "instance buffer exhausted"
344 );
345
346 let render_pass_descriptor = metal::RenderPassDescriptor::new();
347 let color_attachment = render_pass_descriptor
348 .color_attachments()
349 .object_at(0)
350 .unwrap();
351
352 let texture = self.sprite_atlas.metal_texture(texture_id);
353 color_attachment.set_texture(Some(&texture));
354 color_attachment.set_load_action(metal::MTLLoadAction::Clear);
355 color_attachment.set_store_action(metal::MTLStoreAction::Store);
356 color_attachment.set_clear_color(metal::MTLClearColor::new(0., 0., 0., 1.));
357 let command_encoder = command_buffer.new_render_command_encoder(render_pass_descriptor);
358 command_encoder.set_render_pipeline_state(&self.paths_rasterization_pipeline_state);
359 command_encoder.set_vertex_buffer(
360 PathRasterizationInputIndex::Vertices as u64,
361 Some(&self.instances),
362 *offset as u64,
363 );
364 let texture_size = Size {
365 width: DevicePixels::from(texture.width()),
366 height: DevicePixels::from(texture.height()),
367 };
368 command_encoder.set_vertex_bytes(
369 PathRasterizationInputIndex::AtlasTextureSize as u64,
370 mem::size_of_val(&texture_size) as u64,
371 &texture_size as *const Size<DevicePixels> as *const _,
372 );
373
374 let vertices_bytes_len = mem::size_of::<PathVertex<ScaledPixels>>() * vertices.len();
375 let buffer_contents = unsafe { (self.instances.contents() as *mut u8).add(*offset) };
376 unsafe {
377 ptr::copy_nonoverlapping(
378 vertices.as_ptr() as *const u8,
379 buffer_contents,
380 vertices_bytes_len,
381 );
382 }
383
384 command_encoder.draw_primitives(
385 metal::MTLPrimitiveType::Triangle,
386 0,
387 vertices.len() as u64,
388 );
389 command_encoder.end_encoding();
390 *offset = next_offset;
391 }
392
393 tiles
394 }
395
396 fn draw_shadows(
397 &mut self,
398 shadows: &[Shadow],
399 offset: &mut usize,
400 viewport_size: Size<DevicePixels>,
401 command_encoder: &metal::RenderCommandEncoderRef,
402 ) {
403 if shadows.is_empty() {
404 return;
405 }
406 align_offset(offset);
407
408 command_encoder.set_render_pipeline_state(&self.shadows_pipeline_state);
409 command_encoder.set_vertex_buffer(
410 ShadowInputIndex::Vertices as u64,
411 Some(&self.unit_vertices),
412 0,
413 );
414 command_encoder.set_vertex_buffer(
415 ShadowInputIndex::Shadows as u64,
416 Some(&self.instances),
417 *offset as u64,
418 );
419 command_encoder.set_fragment_buffer(
420 ShadowInputIndex::Shadows as u64,
421 Some(&self.instances),
422 *offset as u64,
423 );
424
425 command_encoder.set_vertex_bytes(
426 ShadowInputIndex::ViewportSize as u64,
427 mem::size_of_val(&viewport_size) as u64,
428 &viewport_size as *const Size<DevicePixels> as *const _,
429 );
430
431 let shadow_bytes_len = mem::size_of::<Shadow>() * shadows.len();
432 let buffer_contents = unsafe { (self.instances.contents() as *mut u8).add(*offset) };
433 unsafe {
434 ptr::copy_nonoverlapping(
435 shadows.as_ptr() as *const u8,
436 buffer_contents,
437 shadow_bytes_len,
438 );
439 }
440
441 let next_offset = *offset + shadow_bytes_len;
442 assert!(
443 next_offset <= INSTANCE_BUFFER_SIZE,
444 "instance buffer exhausted"
445 );
446
447 command_encoder.draw_primitives_instanced(
448 metal::MTLPrimitiveType::Triangle,
449 0,
450 6,
451 shadows.len() as u64,
452 );
453 *offset = next_offset;
454 }
455
456 fn draw_quads(
457 &mut self,
458 quads: &[Quad],
459 offset: &mut usize,
460 viewport_size: Size<DevicePixels>,
461 command_encoder: &metal::RenderCommandEncoderRef,
462 ) {
463 if quads.is_empty() {
464 return;
465 }
466 align_offset(offset);
467
468 command_encoder.set_render_pipeline_state(&self.quads_pipeline_state);
469 command_encoder.set_vertex_buffer(
470 QuadInputIndex::Vertices as u64,
471 Some(&self.unit_vertices),
472 0,
473 );
474 command_encoder.set_vertex_buffer(
475 QuadInputIndex::Quads as u64,
476 Some(&self.instances),
477 *offset as u64,
478 );
479 command_encoder.set_fragment_buffer(
480 QuadInputIndex::Quads as u64,
481 Some(&self.instances),
482 *offset as u64,
483 );
484
485 command_encoder.set_vertex_bytes(
486 QuadInputIndex::ViewportSize as u64,
487 mem::size_of_val(&viewport_size) as u64,
488 &viewport_size as *const Size<DevicePixels> as *const _,
489 );
490
491 let quad_bytes_len = mem::size_of::<Quad>() * quads.len();
492 let buffer_contents = unsafe { (self.instances.contents() as *mut u8).add(*offset) };
493 unsafe {
494 ptr::copy_nonoverlapping(quads.as_ptr() as *const u8, buffer_contents, quad_bytes_len);
495 }
496
497 let next_offset = *offset + quad_bytes_len;
498 assert!(
499 next_offset <= INSTANCE_BUFFER_SIZE,
500 "instance buffer exhausted"
501 );
502
503 command_encoder.draw_primitives_instanced(
504 metal::MTLPrimitiveType::Triangle,
505 0,
506 6,
507 quads.len() as u64,
508 );
509 *offset = next_offset;
510 }
511
512 fn draw_paths(
513 &mut self,
514 paths: &[Path<ScaledPixels>],
515 tiles_by_path_id: &HashMap<PathId, AtlasTile>,
516 offset: &mut usize,
517 viewport_size: Size<DevicePixels>,
518 command_encoder: &metal::RenderCommandEncoderRef,
519 ) {
520 if paths.is_empty() {
521 return;
522 }
523
524 command_encoder.set_render_pipeline_state(&self.path_sprites_pipeline_state);
525 command_encoder.set_vertex_buffer(
526 SpriteInputIndex::Vertices as u64,
527 Some(&self.unit_vertices),
528 0,
529 );
530 command_encoder.set_vertex_bytes(
531 SpriteInputIndex::ViewportSize as u64,
532 mem::size_of_val(&viewport_size) as u64,
533 &viewport_size as *const Size<DevicePixels> as *const _,
534 );
535
536 let mut prev_texture_id = None;
537 let mut sprites = SmallVec::<[_; 1]>::new();
538 let mut paths_and_tiles = paths
539 .into_iter()
540 .map(|path| (path, tiles_by_path_id.get(&path.id).unwrap()))
541 .peekable();
542
543 loop {
544 if let Some((path, tile)) = paths_and_tiles.peek() {
545 if prev_texture_id.map_or(true, |texture_id| texture_id == tile.texture_id) {
546 prev_texture_id = Some(tile.texture_id);
547 sprites.push(PathSprite {
548 bounds: Bounds {
549 origin: path.bounds.origin.map(|p| p.floor()),
550 size: tile.bounds.size.map(Into::into),
551 },
552 color: path.color,
553 tile: (*tile).clone(),
554 });
555 paths_and_tiles.next();
556 continue;
557 }
558 }
559
560 if sprites.is_empty() {
561 break;
562 } else {
563 align_offset(offset);
564 let texture_id = prev_texture_id.take().unwrap();
565 let texture: metal::Texture = self.sprite_atlas.metal_texture(texture_id);
566 let texture_size = size(
567 DevicePixels(texture.width() as i32),
568 DevicePixels(texture.height() as i32),
569 );
570
571 command_encoder.set_vertex_buffer(
572 SpriteInputIndex::Sprites as u64,
573 Some(&self.instances),
574 *offset as u64,
575 );
576 command_encoder.set_vertex_bytes(
577 SpriteInputIndex::AtlasTextureSize as u64,
578 mem::size_of_val(&texture_size) as u64,
579 &texture_size as *const Size<DevicePixels> as *const _,
580 );
581 command_encoder.set_fragment_buffer(
582 SpriteInputIndex::Sprites as u64,
583 Some(&self.instances),
584 *offset as u64,
585 );
586 command_encoder
587 .set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
588
589 let sprite_bytes_len = mem::size_of::<MonochromeSprite>() * sprites.len();
590 let buffer_contents =
591 unsafe { (self.instances.contents() as *mut u8).add(*offset) };
592 unsafe {
593 ptr::copy_nonoverlapping(
594 sprites.as_ptr() as *const u8,
595 buffer_contents,
596 sprite_bytes_len,
597 );
598 }
599
600 let next_offset = *offset + sprite_bytes_len;
601 assert!(
602 next_offset <= INSTANCE_BUFFER_SIZE,
603 "instance buffer exhausted"
604 );
605
606 command_encoder.draw_primitives_instanced(
607 metal::MTLPrimitiveType::Triangle,
608 0,
609 6,
610 sprites.len() as u64,
611 );
612 *offset = next_offset;
613 sprites.clear();
614 }
615 }
616 }
617
618 fn draw_underlines(
619 &mut self,
620 underlines: &[Underline],
621 offset: &mut usize,
622 viewport_size: Size<DevicePixels>,
623 command_encoder: &metal::RenderCommandEncoderRef,
624 ) {
625 if underlines.is_empty() {
626 return;
627 }
628 align_offset(offset);
629
630 command_encoder.set_render_pipeline_state(&self.underlines_pipeline_state);
631 command_encoder.set_vertex_buffer(
632 UnderlineInputIndex::Vertices as u64,
633 Some(&self.unit_vertices),
634 0,
635 );
636 command_encoder.set_vertex_buffer(
637 UnderlineInputIndex::Underlines as u64,
638 Some(&self.instances),
639 *offset as u64,
640 );
641 command_encoder.set_fragment_buffer(
642 UnderlineInputIndex::Underlines as u64,
643 Some(&self.instances),
644 *offset as u64,
645 );
646
647 command_encoder.set_vertex_bytes(
648 UnderlineInputIndex::ViewportSize as u64,
649 mem::size_of_val(&viewport_size) as u64,
650 &viewport_size as *const Size<DevicePixels> as *const _,
651 );
652
653 let quad_bytes_len = mem::size_of::<Underline>() * underlines.len();
654 let buffer_contents = unsafe { (self.instances.contents() as *mut u8).add(*offset) };
655 unsafe {
656 ptr::copy_nonoverlapping(
657 underlines.as_ptr() as *const u8,
658 buffer_contents,
659 quad_bytes_len,
660 );
661 }
662
663 let next_offset = *offset + quad_bytes_len;
664 assert!(
665 next_offset <= INSTANCE_BUFFER_SIZE,
666 "instance buffer exhausted"
667 );
668
669 command_encoder.draw_primitives_instanced(
670 metal::MTLPrimitiveType::Triangle,
671 0,
672 6,
673 underlines.len() as u64,
674 );
675 *offset = next_offset;
676 }
677
678 fn draw_monochrome_sprites(
679 &mut self,
680 texture_id: AtlasTextureId,
681 sprites: &[MonochromeSprite],
682 offset: &mut usize,
683 viewport_size: Size<DevicePixels>,
684 command_encoder: &metal::RenderCommandEncoderRef,
685 ) {
686 if sprites.is_empty() {
687 return;
688 }
689 align_offset(offset);
690
691 let texture = self.sprite_atlas.metal_texture(texture_id);
692 let texture_size = size(
693 DevicePixels(texture.width() as i32),
694 DevicePixels(texture.height() as i32),
695 );
696 command_encoder.set_render_pipeline_state(&self.monochrome_sprites_pipeline_state);
697 command_encoder.set_vertex_buffer(
698 SpriteInputIndex::Vertices as u64,
699 Some(&self.unit_vertices),
700 0,
701 );
702 command_encoder.set_vertex_buffer(
703 SpriteInputIndex::Sprites as u64,
704 Some(&self.instances),
705 *offset as u64,
706 );
707 command_encoder.set_vertex_bytes(
708 SpriteInputIndex::ViewportSize as u64,
709 mem::size_of_val(&viewport_size) as u64,
710 &viewport_size as *const Size<DevicePixels> as *const _,
711 );
712 command_encoder.set_vertex_bytes(
713 SpriteInputIndex::AtlasTextureSize as u64,
714 mem::size_of_val(&texture_size) as u64,
715 &texture_size as *const Size<DevicePixels> as *const _,
716 );
717 command_encoder.set_fragment_buffer(
718 SpriteInputIndex::Sprites as u64,
719 Some(&self.instances),
720 *offset as u64,
721 );
722 command_encoder.set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
723
724 let sprite_bytes_len = mem::size_of::<MonochromeSprite>() * sprites.len();
725 let buffer_contents = unsafe { (self.instances.contents() as *mut u8).add(*offset) };
726 unsafe {
727 ptr::copy_nonoverlapping(
728 sprites.as_ptr() as *const u8,
729 buffer_contents,
730 sprite_bytes_len,
731 );
732 }
733
734 let next_offset = *offset + sprite_bytes_len;
735 assert!(
736 next_offset <= INSTANCE_BUFFER_SIZE,
737 "instance buffer exhausted"
738 );
739
740 command_encoder.draw_primitives_instanced(
741 metal::MTLPrimitiveType::Triangle,
742 0,
743 6,
744 sprites.len() as u64,
745 );
746 *offset = next_offset;
747 }
748
749 fn draw_polychrome_sprites(
750 &mut self,
751 texture_id: AtlasTextureId,
752 sprites: &[PolychromeSprite],
753 offset: &mut usize,
754 viewport_size: Size<DevicePixels>,
755 command_encoder: &metal::RenderCommandEncoderRef,
756 ) {
757 if sprites.is_empty() {
758 return;
759 }
760 align_offset(offset);
761
762 let texture = self.sprite_atlas.metal_texture(texture_id);
763 let texture_size = size(
764 DevicePixels(texture.width() as i32),
765 DevicePixels(texture.height() as i32),
766 );
767 command_encoder.set_render_pipeline_state(&self.polychrome_sprites_pipeline_state);
768 command_encoder.set_vertex_buffer(
769 SpriteInputIndex::Vertices as u64,
770 Some(&self.unit_vertices),
771 0,
772 );
773 command_encoder.set_vertex_buffer(
774 SpriteInputIndex::Sprites as u64,
775 Some(&self.instances),
776 *offset as u64,
777 );
778 command_encoder.set_vertex_bytes(
779 SpriteInputIndex::ViewportSize as u64,
780 mem::size_of_val(&viewport_size) as u64,
781 &viewport_size as *const Size<DevicePixels> as *const _,
782 );
783 command_encoder.set_vertex_bytes(
784 SpriteInputIndex::AtlasTextureSize as u64,
785 mem::size_of_val(&texture_size) as u64,
786 &texture_size as *const Size<DevicePixels> as *const _,
787 );
788 command_encoder.set_fragment_buffer(
789 SpriteInputIndex::Sprites as u64,
790 Some(&self.instances),
791 *offset as u64,
792 );
793 command_encoder.set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
794
795 let sprite_bytes_len = mem::size_of::<PolychromeSprite>() * sprites.len();
796 let buffer_contents = unsafe { (self.instances.contents() as *mut u8).add(*offset) };
797 unsafe {
798 ptr::copy_nonoverlapping(
799 sprites.as_ptr() as *const u8,
800 buffer_contents,
801 sprite_bytes_len,
802 );
803 }
804
805 let next_offset = *offset + sprite_bytes_len;
806 assert!(
807 next_offset <= INSTANCE_BUFFER_SIZE,
808 "instance buffer exhausted"
809 );
810
811 command_encoder.draw_primitives_instanced(
812 metal::MTLPrimitiveType::Triangle,
813 0,
814 6,
815 sprites.len() as u64,
816 );
817 *offset = next_offset;
818 }
819
820 fn draw_surfaces(
821 &mut self,
822 surfaces: &[Surface],
823 offset: &mut usize,
824 viewport_size: Size<DevicePixels>,
825 command_encoder: &metal::RenderCommandEncoderRef,
826 ) {
827 command_encoder.set_render_pipeline_state(&self.surfaces_pipeline_state);
828 command_encoder.set_vertex_buffer(
829 SurfaceInputIndex::Vertices as u64,
830 Some(&self.unit_vertices),
831 0,
832 );
833 command_encoder.set_vertex_bytes(
834 SurfaceInputIndex::ViewportSize as u64,
835 mem::size_of_val(&viewport_size) as u64,
836 &viewport_size as *const Size<DevicePixels> as *const _,
837 );
838
839 for surface in surfaces {
840 let texture_size = size(
841 DevicePixels::from(surface.image_buffer.width() as i32),
842 DevicePixels::from(surface.image_buffer.height() as i32),
843 );
844
845 assert_eq!(
846 surface.image_buffer.pixel_format_type(),
847 media::core_video::kCVPixelFormatType_420YpCbCr8BiPlanarFullRange
848 );
849
850 let y_texture = self
851 .core_video_texture_cache
852 .create_texture_from_image(
853 surface.image_buffer.as_concrete_TypeRef(),
854 ptr::null(),
855 MTLPixelFormat::R8Unorm,
856 surface.image_buffer.plane_width(0),
857 surface.image_buffer.plane_height(0),
858 0,
859 )
860 .unwrap();
861 let cb_cr_texture = self
862 .core_video_texture_cache
863 .create_texture_from_image(
864 surface.image_buffer.as_concrete_TypeRef(),
865 ptr::null(),
866 MTLPixelFormat::RG8Unorm,
867 surface.image_buffer.plane_width(1),
868 surface.image_buffer.plane_height(1),
869 1,
870 )
871 .unwrap();
872
873 align_offset(offset);
874 let next_offset = *offset + mem::size_of::<Surface>();
875 assert!(
876 next_offset <= INSTANCE_BUFFER_SIZE,
877 "instance buffer exhausted"
878 );
879
880 command_encoder.set_vertex_buffer(
881 SurfaceInputIndex::Surfaces as u64,
882 Some(&self.instances),
883 *offset as u64,
884 );
885 command_encoder.set_vertex_bytes(
886 SurfaceInputIndex::TextureSize as u64,
887 mem::size_of_val(&texture_size) as u64,
888 &texture_size as *const Size<DevicePixels> as *const _,
889 );
890 command_encoder.set_fragment_texture(
891 SurfaceInputIndex::YTexture as u64,
892 Some(y_texture.as_texture_ref()),
893 );
894 command_encoder.set_fragment_texture(
895 SurfaceInputIndex::CbCrTexture as u64,
896 Some(cb_cr_texture.as_texture_ref()),
897 );
898
899 unsafe {
900 let buffer_contents =
901 (self.instances.contents() as *mut u8).add(*offset) as *mut SurfaceBounds;
902 ptr::write(
903 buffer_contents,
904 SurfaceBounds {
905 bounds: surface.bounds,
906 content_mask: surface.content_mask.clone(),
907 },
908 );
909 }
910
911 command_encoder.draw_primitives(metal::MTLPrimitiveType::Triangle, 0, 6);
912 *offset = next_offset;
913 }
914 }
915}
916
917fn build_pipeline_state(
918 device: &metal::DeviceRef,
919 library: &metal::LibraryRef,
920 label: &str,
921 vertex_fn_name: &str,
922 fragment_fn_name: &str,
923 pixel_format: metal::MTLPixelFormat,
924) -> metal::RenderPipelineState {
925 let vertex_fn = library
926 .get_function(vertex_fn_name, None)
927 .expect("error locating vertex function");
928 let fragment_fn = library
929 .get_function(fragment_fn_name, None)
930 .expect("error locating fragment function");
931
932 let descriptor = metal::RenderPipelineDescriptor::new();
933 descriptor.set_label(label);
934 descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
935 descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
936 let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
937 color_attachment.set_pixel_format(pixel_format);
938 color_attachment.set_blending_enabled(true);
939 color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
940 color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
941 color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::SourceAlpha);
942 color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::One);
943 color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
944 color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::One);
945
946 device
947 .new_render_pipeline_state(&descriptor)
948 .expect("could not create render pipeline state")
949}
950
951fn build_path_rasterization_pipeline_state(
952 device: &metal::DeviceRef,
953 library: &metal::LibraryRef,
954 label: &str,
955 vertex_fn_name: &str,
956 fragment_fn_name: &str,
957 pixel_format: metal::MTLPixelFormat,
958) -> metal::RenderPipelineState {
959 let vertex_fn = library
960 .get_function(vertex_fn_name, None)
961 .expect("error locating vertex function");
962 let fragment_fn = library
963 .get_function(fragment_fn_name, None)
964 .expect("error locating fragment function");
965
966 let descriptor = metal::RenderPipelineDescriptor::new();
967 descriptor.set_label(label);
968 descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
969 descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
970 let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
971 color_attachment.set_pixel_format(pixel_format);
972 color_attachment.set_blending_enabled(true);
973 color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
974 color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
975 color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::One);
976 color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::One);
977 color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::One);
978 color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::One);
979
980 device
981 .new_render_pipeline_state(&descriptor)
982 .expect("could not create render pipeline state")
983}
984
985// Align to multiples of 256 make Metal happy.
986fn align_offset(offset: &mut usize) {
987 *offset = ((*offset + 255) / 256) * 256;
988}
989
990#[repr(C)]
991enum ShadowInputIndex {
992 Vertices = 0,
993 Shadows = 1,
994 ViewportSize = 2,
995}
996
997#[repr(C)]
998enum QuadInputIndex {
999 Vertices = 0,
1000 Quads = 1,
1001 ViewportSize = 2,
1002}
1003
1004#[repr(C)]
1005enum UnderlineInputIndex {
1006 Vertices = 0,
1007 Underlines = 1,
1008 ViewportSize = 2,
1009}
1010
1011#[repr(C)]
1012enum SpriteInputIndex {
1013 Vertices = 0,
1014 Sprites = 1,
1015 ViewportSize = 2,
1016 AtlasTextureSize = 3,
1017 AtlasTexture = 4,
1018}
1019
1020#[repr(C)]
1021enum SurfaceInputIndex {
1022 Vertices = 0,
1023 Surfaces = 1,
1024 ViewportSize = 2,
1025 TextureSize = 3,
1026 YTexture = 4,
1027 CbCrTexture = 5,
1028}
1029
1030#[repr(C)]
1031enum PathRasterizationInputIndex {
1032 Vertices = 0,
1033 AtlasTextureSize = 1,
1034}
1035
1036#[derive(Clone, Debug, Eq, PartialEq)]
1037#[repr(C)]
1038pub struct PathSprite {
1039 pub bounds: Bounds<ScaledPixels>,
1040 pub color: Hsla,
1041 pub tile: AtlasTile,
1042}
1043
1044#[derive(Clone, Debug, Eq, PartialEq)]
1045#[repr(C)]
1046pub struct SurfaceBounds {
1047 pub bounds: Bounds<ScaledPixels>,
1048 pub content_mask: ContentMask<ScaledPixels>,
1049}