1use crate::{
2 point, size, AtlasTextureId, AtlasTextureKind, AtlasTile, Bounds, ContentMask, DevicePixels,
3 Hsla, MetalAtlas, MonochromeSprite, Path, PathId, PathVertex, PolychromeSprite, PrimitiveBatch,
4 Quad, ScaledPixels, Scene, Shadow, Size, Surface, Underline,
5};
6use block::ConcreteBlock;
7use cocoa::{
8 base::{NO, YES},
9 foundation::NSUInteger,
10 quartzcore::AutoresizingMask,
11};
12use collections::HashMap;
13use core_foundation::base::TCFType;
14use foreign_types::ForeignType;
15use media::core_video::CVMetalTextureCache;
16use metal::{CommandQueue, MTLPixelFormat, MTLResourceOptions, NSRange};
17use objc::{self, msg_send, sel, sel_impl};
18use parking_lot::Mutex;
19use smallvec::SmallVec;
20use std::{cell::Cell, ffi::c_void, mem, ptr, sync::Arc};
21
22#[cfg(not(feature = "runtime_shaders"))]
23const SHADERS_METALLIB: &[u8] = include_bytes!(concat!(env!("OUT_DIR"), "/shaders.metallib"));
24#[cfg(feature = "runtime_shaders")]
25const SHADERS_SOURCE_FILE: &'static str =
26 include_str!(concat!(env!("OUT_DIR"), "/stitched_shaders.metal"));
27const INSTANCE_BUFFER_SIZE: usize = 32 * 1024 * 1024; // This is an arbitrary decision. There's probably a more optimal value (maybe even we could adjust dynamically...)
28
29pub(crate) struct MetalRenderer {
30 device: metal::Device,
31 layer: metal::MetalLayer,
32 command_queue: CommandQueue,
33 paths_rasterization_pipeline_state: metal::RenderPipelineState,
34 path_sprites_pipeline_state: metal::RenderPipelineState,
35 shadows_pipeline_state: metal::RenderPipelineState,
36 quads_pipeline_state: metal::RenderPipelineState,
37 underlines_pipeline_state: metal::RenderPipelineState,
38 monochrome_sprites_pipeline_state: metal::RenderPipelineState,
39 polychrome_sprites_pipeline_state: metal::RenderPipelineState,
40 surfaces_pipeline_state: metal::RenderPipelineState,
41 unit_vertices: metal::Buffer,
42 #[allow(clippy::arc_with_non_send_sync)]
43 instance_buffers: Arc<Mutex<Vec<metal::Buffer>>>,
44 sprite_atlas: Arc<MetalAtlas>,
45 core_video_texture_cache: CVMetalTextureCache,
46}
47
48impl MetalRenderer {
49 pub fn new(is_opaque: bool) -> Self {
50 let device: metal::Device = if let Some(device) = metal::Device::system_default() {
51 device
52 } else {
53 log::error!("unable to access a compatible graphics device");
54 std::process::exit(1);
55 };
56
57 let layer = metal::MetalLayer::new();
58 layer.set_device(&device);
59 layer.set_pixel_format(MTLPixelFormat::BGRA8Unorm);
60 layer.set_presents_with_transaction(true);
61 layer.set_opaque(is_opaque);
62 unsafe {
63 let _: () = msg_send![&*layer, setAllowsNextDrawableTimeout: NO];
64 let _: () = msg_send![&*layer, setNeedsDisplayOnBoundsChange: YES];
65 let _: () = msg_send![
66 &*layer,
67 setAutoresizingMask: AutoresizingMask::WIDTH_SIZABLE
68 | AutoresizingMask::HEIGHT_SIZABLE
69 ];
70 }
71 #[cfg(feature = "runtime_shaders")]
72 let library = device
73 .new_library_with_source(&SHADERS_SOURCE_FILE, &metal::CompileOptions::new())
74 .expect("error building metal library");
75 #[cfg(not(feature = "runtime_shaders"))]
76 let library = device
77 .new_library_with_data(SHADERS_METALLIB)
78 .expect("error building metal library");
79
80 fn to_float2_bits(point: crate::PointF) -> u64 {
81 let mut output = point.y.to_bits() as u64;
82 output <<= 32;
83 output |= point.x.to_bits() as u64;
84 output
85 }
86
87 let unit_vertices = [
88 to_float2_bits(point(0., 0.)),
89 to_float2_bits(point(1., 0.)),
90 to_float2_bits(point(0., 1.)),
91 to_float2_bits(point(0., 1.)),
92 to_float2_bits(point(1., 0.)),
93 to_float2_bits(point(1., 1.)),
94 ];
95 let unit_vertices = device.new_buffer_with_data(
96 unit_vertices.as_ptr() as *const c_void,
97 mem::size_of_val(&unit_vertices) as u64,
98 MTLResourceOptions::StorageModeManaged,
99 );
100
101 let paths_rasterization_pipeline_state = build_path_rasterization_pipeline_state(
102 &device,
103 &library,
104 "paths_rasterization",
105 "path_rasterization_vertex",
106 "path_rasterization_fragment",
107 MTLPixelFormat::R16Float,
108 );
109 let path_sprites_pipeline_state = build_pipeline_state(
110 &device,
111 &library,
112 "path_sprites",
113 "path_sprite_vertex",
114 "path_sprite_fragment",
115 MTLPixelFormat::BGRA8Unorm,
116 );
117 let shadows_pipeline_state = build_pipeline_state(
118 &device,
119 &library,
120 "shadows",
121 "shadow_vertex",
122 "shadow_fragment",
123 MTLPixelFormat::BGRA8Unorm,
124 );
125 let quads_pipeline_state = build_pipeline_state(
126 &device,
127 &library,
128 "quads",
129 "quad_vertex",
130 "quad_fragment",
131 MTLPixelFormat::BGRA8Unorm,
132 );
133 let underlines_pipeline_state = build_pipeline_state(
134 &device,
135 &library,
136 "underlines",
137 "underline_vertex",
138 "underline_fragment",
139 MTLPixelFormat::BGRA8Unorm,
140 );
141 let monochrome_sprites_pipeline_state = build_pipeline_state(
142 &device,
143 &library,
144 "monochrome_sprites",
145 "monochrome_sprite_vertex",
146 "monochrome_sprite_fragment",
147 MTLPixelFormat::BGRA8Unorm,
148 );
149 let polychrome_sprites_pipeline_state = build_pipeline_state(
150 &device,
151 &library,
152 "polychrome_sprites",
153 "polychrome_sprite_vertex",
154 "polychrome_sprite_fragment",
155 MTLPixelFormat::BGRA8Unorm,
156 );
157 let surfaces_pipeline_state = build_pipeline_state(
158 &device,
159 &library,
160 "surfaces",
161 "surface_vertex",
162 "surface_fragment",
163 MTLPixelFormat::BGRA8Unorm,
164 );
165
166 let command_queue = device.new_command_queue();
167 let sprite_atlas = Arc::new(MetalAtlas::new(device.clone()));
168 let core_video_texture_cache =
169 unsafe { CVMetalTextureCache::new(device.as_ptr()).unwrap() };
170
171 Self {
172 device,
173 layer,
174 command_queue,
175 paths_rasterization_pipeline_state,
176 path_sprites_pipeline_state,
177 shadows_pipeline_state,
178 quads_pipeline_state,
179 underlines_pipeline_state,
180 monochrome_sprites_pipeline_state,
181 polychrome_sprites_pipeline_state,
182 surfaces_pipeline_state,
183 unit_vertices,
184 instance_buffers: Arc::default(),
185 sprite_atlas,
186 core_video_texture_cache,
187 }
188 }
189
190 pub fn layer(&self) -> &metal::MetalLayerRef {
191 &self.layer
192 }
193
194 pub fn sprite_atlas(&self) -> &Arc<MetalAtlas> {
195 &self.sprite_atlas
196 }
197
198 pub fn draw(&mut self, scene: &Scene) {
199 let layer = self.layer.clone();
200 let viewport_size = layer.drawable_size();
201 let viewport_size: Size<DevicePixels> = size(
202 (viewport_size.width.ceil() as i32).into(),
203 (viewport_size.height.ceil() as i32).into(),
204 );
205 let drawable = if let Some(drawable) = layer.next_drawable() {
206 drawable
207 } else {
208 log::error!(
209 "failed to retrieve next drawable, drawable size: {:?}",
210 viewport_size
211 );
212 return;
213 };
214 let mut instance_buffer = self.instance_buffers.lock().pop().unwrap_or_else(|| {
215 self.device.new_buffer(
216 INSTANCE_BUFFER_SIZE as u64,
217 MTLResourceOptions::StorageModeManaged,
218 )
219 });
220 let command_queue = self.command_queue.clone();
221 let command_buffer = command_queue.new_command_buffer();
222 let mut instance_offset = 0;
223
224 let Some(path_tiles) = self.rasterize_paths(
225 scene.paths(),
226 &mut instance_buffer,
227 &mut instance_offset,
228 command_buffer,
229 ) else {
230 panic!("failed to rasterize {} paths", scene.paths().len());
231 };
232
233 let render_pass_descriptor = metal::RenderPassDescriptor::new();
234 let color_attachment = render_pass_descriptor
235 .color_attachments()
236 .object_at(0)
237 .unwrap();
238
239 color_attachment.set_texture(Some(drawable.texture()));
240 color_attachment.set_load_action(metal::MTLLoadAction::Clear);
241 color_attachment.set_store_action(metal::MTLStoreAction::Store);
242 let alpha = if self.layer.is_opaque() { 1. } else { 0. };
243 color_attachment.set_clear_color(metal::MTLClearColor::new(0., 0., 0., alpha));
244 let command_encoder = command_buffer.new_render_command_encoder(render_pass_descriptor);
245
246 command_encoder.set_viewport(metal::MTLViewport {
247 originX: 0.0,
248 originY: 0.0,
249 width: i32::from(viewport_size.width) as f64,
250 height: i32::from(viewport_size.height) as f64,
251 znear: 0.0,
252 zfar: 1.0,
253 });
254 for batch in scene.batches() {
255 let ok = match batch {
256 PrimitiveBatch::Shadows(shadows) => self.draw_shadows(
257 shadows,
258 &mut instance_buffer,
259 &mut instance_offset,
260 viewport_size,
261 command_encoder,
262 ),
263 PrimitiveBatch::Quads(quads) => self.draw_quads(
264 quads,
265 &mut instance_buffer,
266 &mut instance_offset,
267 viewport_size,
268 command_encoder,
269 ),
270 PrimitiveBatch::Paths(paths) => self.draw_paths(
271 paths,
272 &path_tiles,
273 &mut instance_buffer,
274 &mut instance_offset,
275 viewport_size,
276 command_encoder,
277 ),
278 PrimitiveBatch::Underlines(underlines) => self.draw_underlines(
279 underlines,
280 &mut instance_buffer,
281 &mut instance_offset,
282 viewport_size,
283 command_encoder,
284 ),
285 PrimitiveBatch::MonochromeSprites {
286 texture_id,
287 sprites,
288 } => self.draw_monochrome_sprites(
289 texture_id,
290 sprites,
291 &mut instance_buffer,
292 &mut instance_offset,
293 viewport_size,
294 command_encoder,
295 ),
296 PrimitiveBatch::PolychromeSprites {
297 texture_id,
298 sprites,
299 } => self.draw_polychrome_sprites(
300 texture_id,
301 sprites,
302 &mut instance_buffer,
303 &mut instance_offset,
304 viewport_size,
305 command_encoder,
306 ),
307 PrimitiveBatch::Surfaces(surfaces) => self.draw_surfaces(
308 surfaces,
309 &mut instance_buffer,
310 &mut instance_offset,
311 viewport_size,
312 command_encoder,
313 ),
314 };
315
316 if !ok {
317 panic!("scene too large: {} paths, {} shadows, {} quads, {} underlines, {} mono, {} poly, {} surfaces",
318 scene.paths.len(),
319 scene.shadows.len(),
320 scene.quads.len(),
321 scene.underlines.len(),
322 scene.monochrome_sprites.len(),
323 scene.polychrome_sprites.len(),
324 scene.surfaces.len(),
325 )
326 }
327 }
328
329 command_encoder.end_encoding();
330
331 instance_buffer.did_modify_range(NSRange {
332 location: 0,
333 length: instance_offset as NSUInteger,
334 });
335
336 let instance_buffers = self.instance_buffers.clone();
337 let instance_buffer = Cell::new(Some(instance_buffer));
338 let block = ConcreteBlock::new(move |_| {
339 if let Some(instance_buffer) = instance_buffer.take() {
340 instance_buffers.lock().push(instance_buffer);
341 }
342 });
343 let block = block.copy();
344 command_buffer.add_completed_handler(&block);
345 command_buffer.commit();
346 self.sprite_atlas.clear_textures(AtlasTextureKind::Path);
347
348 command_buffer.wait_until_scheduled();
349 drawable.present();
350 }
351
352 fn rasterize_paths(
353 &mut self,
354 paths: &[Path<ScaledPixels>],
355 instance_buffer: &mut metal::Buffer,
356 instance_offset: &mut usize,
357 command_buffer: &metal::CommandBufferRef,
358 ) -> Option<HashMap<PathId, AtlasTile>> {
359 let mut tiles = HashMap::default();
360 let mut vertices_by_texture_id = HashMap::default();
361 for path in paths {
362 let clipped_bounds = path.bounds.intersect(&path.content_mask.bounds);
363
364 let tile = self
365 .sprite_atlas
366 .allocate(clipped_bounds.size.map(Into::into), AtlasTextureKind::Path);
367 vertices_by_texture_id
368 .entry(tile.texture_id)
369 .or_insert(Vec::new())
370 .extend(path.vertices.iter().map(|vertex| PathVertex {
371 xy_position: vertex.xy_position - clipped_bounds.origin
372 + tile.bounds.origin.map(Into::into),
373 st_position: vertex.st_position,
374 content_mask: ContentMask {
375 bounds: tile.bounds.map(Into::into),
376 },
377 }));
378 tiles.insert(path.id, tile);
379 }
380
381 for (texture_id, vertices) in vertices_by_texture_id {
382 align_offset(instance_offset);
383 let vertices_bytes_len = mem::size_of_val(vertices.as_slice());
384 let next_offset = *instance_offset + vertices_bytes_len;
385 if next_offset > INSTANCE_BUFFER_SIZE {
386 return None;
387 }
388
389 let render_pass_descriptor = metal::RenderPassDescriptor::new();
390 let color_attachment = render_pass_descriptor
391 .color_attachments()
392 .object_at(0)
393 .unwrap();
394
395 let texture = self.sprite_atlas.metal_texture(texture_id);
396 color_attachment.set_texture(Some(&texture));
397 color_attachment.set_load_action(metal::MTLLoadAction::Clear);
398 color_attachment.set_store_action(metal::MTLStoreAction::Store);
399 color_attachment.set_clear_color(metal::MTLClearColor::new(0., 0., 0., 1.));
400 let command_encoder = command_buffer.new_render_command_encoder(render_pass_descriptor);
401 command_encoder.set_render_pipeline_state(&self.paths_rasterization_pipeline_state);
402 command_encoder.set_vertex_buffer(
403 PathRasterizationInputIndex::Vertices as u64,
404 Some(instance_buffer),
405 *instance_offset as u64,
406 );
407 let texture_size = Size {
408 width: DevicePixels::from(texture.width()),
409 height: DevicePixels::from(texture.height()),
410 };
411 command_encoder.set_vertex_bytes(
412 PathRasterizationInputIndex::AtlasTextureSize as u64,
413 mem::size_of_val(&texture_size) as u64,
414 &texture_size as *const Size<DevicePixels> as *const _,
415 );
416
417 let buffer_contents =
418 unsafe { (instance_buffer.contents() as *mut u8).add(*instance_offset) };
419 unsafe {
420 ptr::copy_nonoverlapping(
421 vertices.as_ptr() as *const u8,
422 buffer_contents,
423 vertices_bytes_len,
424 );
425 }
426
427 command_encoder.draw_primitives(
428 metal::MTLPrimitiveType::Triangle,
429 0,
430 vertices.len() as u64,
431 );
432 command_encoder.end_encoding();
433 *instance_offset = next_offset;
434 }
435
436 Some(tiles)
437 }
438
439 fn draw_shadows(
440 &mut self,
441 shadows: &[Shadow],
442 instance_buffer: &mut metal::Buffer,
443 instance_offset: &mut usize,
444 viewport_size: Size<DevicePixels>,
445 command_encoder: &metal::RenderCommandEncoderRef,
446 ) -> bool {
447 if shadows.is_empty() {
448 return true;
449 }
450 align_offset(instance_offset);
451
452 command_encoder.set_render_pipeline_state(&self.shadows_pipeline_state);
453 command_encoder.set_vertex_buffer(
454 ShadowInputIndex::Vertices as u64,
455 Some(&self.unit_vertices),
456 0,
457 );
458 command_encoder.set_vertex_buffer(
459 ShadowInputIndex::Shadows as u64,
460 Some(instance_buffer),
461 *instance_offset as u64,
462 );
463 command_encoder.set_fragment_buffer(
464 ShadowInputIndex::Shadows as u64,
465 Some(instance_buffer),
466 *instance_offset as u64,
467 );
468
469 command_encoder.set_vertex_bytes(
470 ShadowInputIndex::ViewportSize as u64,
471 mem::size_of_val(&viewport_size) as u64,
472 &viewport_size as *const Size<DevicePixels> as *const _,
473 );
474
475 let shadow_bytes_len = mem::size_of_val(shadows);
476 let buffer_contents =
477 unsafe { (instance_buffer.contents() as *mut u8).add(*instance_offset) };
478
479 let next_offset = *instance_offset + shadow_bytes_len;
480 if next_offset > INSTANCE_BUFFER_SIZE {
481 return false;
482 }
483
484 unsafe {
485 ptr::copy_nonoverlapping(
486 shadows.as_ptr() as *const u8,
487 buffer_contents,
488 shadow_bytes_len,
489 );
490 }
491
492 command_encoder.draw_primitives_instanced(
493 metal::MTLPrimitiveType::Triangle,
494 0,
495 6,
496 shadows.len() as u64,
497 );
498 *instance_offset = next_offset;
499 true
500 }
501
502 fn draw_quads(
503 &mut self,
504 quads: &[Quad],
505 instance_buffer: &mut metal::Buffer,
506 instance_offset: &mut usize,
507 viewport_size: Size<DevicePixels>,
508 command_encoder: &metal::RenderCommandEncoderRef,
509 ) -> bool {
510 if quads.is_empty() {
511 return true;
512 }
513 align_offset(instance_offset);
514
515 command_encoder.set_render_pipeline_state(&self.quads_pipeline_state);
516 command_encoder.set_vertex_buffer(
517 QuadInputIndex::Vertices as u64,
518 Some(&self.unit_vertices),
519 0,
520 );
521 command_encoder.set_vertex_buffer(
522 QuadInputIndex::Quads as u64,
523 Some(instance_buffer),
524 *instance_offset as u64,
525 );
526 command_encoder.set_fragment_buffer(
527 QuadInputIndex::Quads as u64,
528 Some(instance_buffer),
529 *instance_offset as u64,
530 );
531
532 command_encoder.set_vertex_bytes(
533 QuadInputIndex::ViewportSize as u64,
534 mem::size_of_val(&viewport_size) as u64,
535 &viewport_size as *const Size<DevicePixels> as *const _,
536 );
537
538 let quad_bytes_len = mem::size_of_val(quads);
539 let buffer_contents =
540 unsafe { (instance_buffer.contents() as *mut u8).add(*instance_offset) };
541
542 let next_offset = *instance_offset + quad_bytes_len;
543 if next_offset > INSTANCE_BUFFER_SIZE {
544 return false;
545 }
546
547 unsafe {
548 ptr::copy_nonoverlapping(quads.as_ptr() as *const u8, buffer_contents, quad_bytes_len);
549 }
550
551 command_encoder.draw_primitives_instanced(
552 metal::MTLPrimitiveType::Triangle,
553 0,
554 6,
555 quads.len() as u64,
556 );
557 *instance_offset = next_offset;
558 true
559 }
560
561 fn draw_paths(
562 &mut self,
563 paths: &[Path<ScaledPixels>],
564 tiles_by_path_id: &HashMap<PathId, AtlasTile>,
565 instance_buffer: &mut metal::Buffer,
566 instance_offset: &mut usize,
567 viewport_size: Size<DevicePixels>,
568 command_encoder: &metal::RenderCommandEncoderRef,
569 ) -> bool {
570 if paths.is_empty() {
571 return true;
572 }
573
574 command_encoder.set_render_pipeline_state(&self.path_sprites_pipeline_state);
575 command_encoder.set_vertex_buffer(
576 SpriteInputIndex::Vertices as u64,
577 Some(&self.unit_vertices),
578 0,
579 );
580 command_encoder.set_vertex_bytes(
581 SpriteInputIndex::ViewportSize as u64,
582 mem::size_of_val(&viewport_size) as u64,
583 &viewport_size as *const Size<DevicePixels> as *const _,
584 );
585
586 let mut prev_texture_id = None;
587 let mut sprites = SmallVec::<[_; 1]>::new();
588 let mut paths_and_tiles = paths
589 .iter()
590 .map(|path| (path, tiles_by_path_id.get(&path.id).unwrap()))
591 .peekable();
592
593 loop {
594 if let Some((path, tile)) = paths_and_tiles.peek() {
595 if prev_texture_id.map_or(true, |texture_id| texture_id == tile.texture_id) {
596 prev_texture_id = Some(tile.texture_id);
597 let origin = path.bounds.intersect(&path.content_mask.bounds).origin;
598 sprites.push(PathSprite {
599 bounds: Bounds {
600 origin: origin.map(|p| p.floor()),
601 size: tile.bounds.size.map(Into::into),
602 },
603 color: path.color,
604 tile: (*tile).clone(),
605 });
606 paths_and_tiles.next();
607 continue;
608 }
609 }
610
611 if sprites.is_empty() {
612 break;
613 } else {
614 align_offset(instance_offset);
615 let texture_id = prev_texture_id.take().unwrap();
616 let texture: metal::Texture = self.sprite_atlas.metal_texture(texture_id);
617 let texture_size = size(
618 DevicePixels(texture.width() as i32),
619 DevicePixels(texture.height() as i32),
620 );
621
622 command_encoder.set_vertex_buffer(
623 SpriteInputIndex::Sprites as u64,
624 Some(instance_buffer),
625 *instance_offset as u64,
626 );
627 command_encoder.set_vertex_bytes(
628 SpriteInputIndex::AtlasTextureSize as u64,
629 mem::size_of_val(&texture_size) as u64,
630 &texture_size as *const Size<DevicePixels> as *const _,
631 );
632 command_encoder.set_fragment_buffer(
633 SpriteInputIndex::Sprites as u64,
634 Some(instance_buffer),
635 *instance_offset as u64,
636 );
637 command_encoder
638 .set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
639
640 let sprite_bytes_len = mem::size_of_val(sprites.as_slice());
641 let next_offset = *instance_offset + sprite_bytes_len;
642 if next_offset > INSTANCE_BUFFER_SIZE {
643 return false;
644 }
645
646 let buffer_contents =
647 unsafe { (instance_buffer.contents() as *mut u8).add(*instance_offset) };
648
649 unsafe {
650 ptr::copy_nonoverlapping(
651 sprites.as_ptr() as *const u8,
652 buffer_contents,
653 sprite_bytes_len,
654 );
655 }
656
657 command_encoder.draw_primitives_instanced(
658 metal::MTLPrimitiveType::Triangle,
659 0,
660 6,
661 sprites.len() as u64,
662 );
663 *instance_offset = next_offset;
664 sprites.clear();
665 }
666 }
667 true
668 }
669
670 fn draw_underlines(
671 &mut self,
672 underlines: &[Underline],
673 instance_buffer: &mut metal::Buffer,
674 instance_offset: &mut usize,
675 viewport_size: Size<DevicePixels>,
676 command_encoder: &metal::RenderCommandEncoderRef,
677 ) -> bool {
678 if underlines.is_empty() {
679 return true;
680 }
681 align_offset(instance_offset);
682
683 command_encoder.set_render_pipeline_state(&self.underlines_pipeline_state);
684 command_encoder.set_vertex_buffer(
685 UnderlineInputIndex::Vertices as u64,
686 Some(&self.unit_vertices),
687 0,
688 );
689 command_encoder.set_vertex_buffer(
690 UnderlineInputIndex::Underlines as u64,
691 Some(instance_buffer),
692 *instance_offset as u64,
693 );
694 command_encoder.set_fragment_buffer(
695 UnderlineInputIndex::Underlines as u64,
696 Some(instance_buffer),
697 *instance_offset as u64,
698 );
699
700 command_encoder.set_vertex_bytes(
701 UnderlineInputIndex::ViewportSize as u64,
702 mem::size_of_val(&viewport_size) as u64,
703 &viewport_size as *const Size<DevicePixels> as *const _,
704 );
705
706 let underline_bytes_len = mem::size_of_val(underlines);
707 let buffer_contents =
708 unsafe { (instance_buffer.contents() as *mut u8).add(*instance_offset) };
709
710 let next_offset = *instance_offset + underline_bytes_len;
711 if next_offset > INSTANCE_BUFFER_SIZE {
712 return false;
713 }
714
715 unsafe {
716 ptr::copy_nonoverlapping(
717 underlines.as_ptr() as *const u8,
718 buffer_contents,
719 underline_bytes_len,
720 );
721 }
722
723 command_encoder.draw_primitives_instanced(
724 metal::MTLPrimitiveType::Triangle,
725 0,
726 6,
727 underlines.len() as u64,
728 );
729 *instance_offset = next_offset;
730 true
731 }
732
733 fn draw_monochrome_sprites(
734 &mut self,
735 texture_id: AtlasTextureId,
736 sprites: &[MonochromeSprite],
737 instance_buffer: &mut metal::Buffer,
738 instance_offset: &mut usize,
739 viewport_size: Size<DevicePixels>,
740 command_encoder: &metal::RenderCommandEncoderRef,
741 ) -> bool {
742 if sprites.is_empty() {
743 return true;
744 }
745 align_offset(instance_offset);
746
747 let texture = self.sprite_atlas.metal_texture(texture_id);
748 let texture_size = size(
749 DevicePixels(texture.width() as i32),
750 DevicePixels(texture.height() as i32),
751 );
752 command_encoder.set_render_pipeline_state(&self.monochrome_sprites_pipeline_state);
753 command_encoder.set_vertex_buffer(
754 SpriteInputIndex::Vertices as u64,
755 Some(&self.unit_vertices),
756 0,
757 );
758 command_encoder.set_vertex_buffer(
759 SpriteInputIndex::Sprites as u64,
760 Some(instance_buffer),
761 *instance_offset as u64,
762 );
763 command_encoder.set_vertex_bytes(
764 SpriteInputIndex::ViewportSize as u64,
765 mem::size_of_val(&viewport_size) as u64,
766 &viewport_size as *const Size<DevicePixels> as *const _,
767 );
768 command_encoder.set_vertex_bytes(
769 SpriteInputIndex::AtlasTextureSize as u64,
770 mem::size_of_val(&texture_size) as u64,
771 &texture_size as *const Size<DevicePixels> as *const _,
772 );
773 command_encoder.set_fragment_buffer(
774 SpriteInputIndex::Sprites as u64,
775 Some(instance_buffer),
776 *instance_offset as u64,
777 );
778 command_encoder.set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
779
780 let sprite_bytes_len = mem::size_of_val(sprites);
781 let buffer_contents =
782 unsafe { (instance_buffer.contents() as *mut u8).add(*instance_offset) };
783
784 let next_offset = *instance_offset + sprite_bytes_len;
785 if next_offset > INSTANCE_BUFFER_SIZE {
786 return false;
787 }
788
789 unsafe {
790 ptr::copy_nonoverlapping(
791 sprites.as_ptr() as *const u8,
792 buffer_contents,
793 sprite_bytes_len,
794 );
795 }
796
797 command_encoder.draw_primitives_instanced(
798 metal::MTLPrimitiveType::Triangle,
799 0,
800 6,
801 sprites.len() as u64,
802 );
803 *instance_offset = next_offset;
804 true
805 }
806
807 fn draw_polychrome_sprites(
808 &mut self,
809 texture_id: AtlasTextureId,
810 sprites: &[PolychromeSprite],
811 instance_buffer: &mut metal::Buffer,
812 instance_offset: &mut usize,
813 viewport_size: Size<DevicePixels>,
814 command_encoder: &metal::RenderCommandEncoderRef,
815 ) -> bool {
816 if sprites.is_empty() {
817 return true;
818 }
819 align_offset(instance_offset);
820
821 let texture = self.sprite_atlas.metal_texture(texture_id);
822 let texture_size = size(
823 DevicePixels(texture.width() as i32),
824 DevicePixels(texture.height() as i32),
825 );
826 command_encoder.set_render_pipeline_state(&self.polychrome_sprites_pipeline_state);
827 command_encoder.set_vertex_buffer(
828 SpriteInputIndex::Vertices as u64,
829 Some(&self.unit_vertices),
830 0,
831 );
832 command_encoder.set_vertex_buffer(
833 SpriteInputIndex::Sprites as u64,
834 Some(instance_buffer),
835 *instance_offset as u64,
836 );
837 command_encoder.set_vertex_bytes(
838 SpriteInputIndex::ViewportSize as u64,
839 mem::size_of_val(&viewport_size) as u64,
840 &viewport_size as *const Size<DevicePixels> as *const _,
841 );
842 command_encoder.set_vertex_bytes(
843 SpriteInputIndex::AtlasTextureSize as u64,
844 mem::size_of_val(&texture_size) as u64,
845 &texture_size as *const Size<DevicePixels> as *const _,
846 );
847 command_encoder.set_fragment_buffer(
848 SpriteInputIndex::Sprites as u64,
849 Some(instance_buffer),
850 *instance_offset as u64,
851 );
852 command_encoder.set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
853
854 let sprite_bytes_len = mem::size_of_val(sprites);
855 let buffer_contents =
856 unsafe { (instance_buffer.contents() as *mut u8).add(*instance_offset) };
857
858 let next_offset = *instance_offset + sprite_bytes_len;
859 if next_offset > INSTANCE_BUFFER_SIZE {
860 return false;
861 }
862
863 unsafe {
864 ptr::copy_nonoverlapping(
865 sprites.as_ptr() as *const u8,
866 buffer_contents,
867 sprite_bytes_len,
868 );
869 }
870
871 command_encoder.draw_primitives_instanced(
872 metal::MTLPrimitiveType::Triangle,
873 0,
874 6,
875 sprites.len() as u64,
876 );
877 *instance_offset = next_offset;
878 true
879 }
880
881 fn draw_surfaces(
882 &mut self,
883 surfaces: &[Surface],
884 instance_buffer: &mut metal::Buffer,
885 instance_offset: &mut usize,
886 viewport_size: Size<DevicePixels>,
887 command_encoder: &metal::RenderCommandEncoderRef,
888 ) -> bool {
889 command_encoder.set_render_pipeline_state(&self.surfaces_pipeline_state);
890 command_encoder.set_vertex_buffer(
891 SurfaceInputIndex::Vertices as u64,
892 Some(&self.unit_vertices),
893 0,
894 );
895 command_encoder.set_vertex_bytes(
896 SurfaceInputIndex::ViewportSize as u64,
897 mem::size_of_val(&viewport_size) as u64,
898 &viewport_size as *const Size<DevicePixels> as *const _,
899 );
900
901 for surface in surfaces {
902 let texture_size = size(
903 DevicePixels::from(surface.image_buffer.width() as i32),
904 DevicePixels::from(surface.image_buffer.height() as i32),
905 );
906
907 assert_eq!(
908 surface.image_buffer.pixel_format_type(),
909 media::core_video::kCVPixelFormatType_420YpCbCr8BiPlanarFullRange
910 );
911
912 let y_texture = unsafe {
913 self.core_video_texture_cache
914 .create_texture_from_image(
915 surface.image_buffer.as_concrete_TypeRef(),
916 ptr::null(),
917 MTLPixelFormat::R8Unorm,
918 surface.image_buffer.plane_width(0),
919 surface.image_buffer.plane_height(0),
920 0,
921 )
922 .unwrap()
923 };
924 let cb_cr_texture = unsafe {
925 self.core_video_texture_cache
926 .create_texture_from_image(
927 surface.image_buffer.as_concrete_TypeRef(),
928 ptr::null(),
929 MTLPixelFormat::RG8Unorm,
930 surface.image_buffer.plane_width(1),
931 surface.image_buffer.plane_height(1),
932 1,
933 )
934 .unwrap()
935 };
936
937 align_offset(instance_offset);
938 let next_offset = *instance_offset + mem::size_of::<Surface>();
939 if next_offset > INSTANCE_BUFFER_SIZE {
940 return false;
941 }
942
943 command_encoder.set_vertex_buffer(
944 SurfaceInputIndex::Surfaces as u64,
945 Some(instance_buffer),
946 *instance_offset as u64,
947 );
948 command_encoder.set_vertex_bytes(
949 SurfaceInputIndex::TextureSize as u64,
950 mem::size_of_val(&texture_size) as u64,
951 &texture_size as *const Size<DevicePixels> as *const _,
952 );
953 command_encoder.set_fragment_texture(
954 SurfaceInputIndex::YTexture as u64,
955 Some(y_texture.as_texture_ref()),
956 );
957 command_encoder.set_fragment_texture(
958 SurfaceInputIndex::CbCrTexture as u64,
959 Some(cb_cr_texture.as_texture_ref()),
960 );
961
962 unsafe {
963 let buffer_contents = (instance_buffer.contents() as *mut u8).add(*instance_offset)
964 as *mut SurfaceBounds;
965 ptr::write(
966 buffer_contents,
967 SurfaceBounds {
968 bounds: surface.bounds,
969 content_mask: surface.content_mask.clone(),
970 },
971 );
972 }
973
974 command_encoder.draw_primitives(metal::MTLPrimitiveType::Triangle, 0, 6);
975 *instance_offset = next_offset;
976 }
977 true
978 }
979}
980
981fn build_pipeline_state(
982 device: &metal::DeviceRef,
983 library: &metal::LibraryRef,
984 label: &str,
985 vertex_fn_name: &str,
986 fragment_fn_name: &str,
987 pixel_format: metal::MTLPixelFormat,
988) -> metal::RenderPipelineState {
989 let vertex_fn = library
990 .get_function(vertex_fn_name, None)
991 .expect("error locating vertex function");
992 let fragment_fn = library
993 .get_function(fragment_fn_name, None)
994 .expect("error locating fragment function");
995
996 let descriptor = metal::RenderPipelineDescriptor::new();
997 descriptor.set_label(label);
998 descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
999 descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
1000 let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
1001 color_attachment.set_pixel_format(pixel_format);
1002 color_attachment.set_blending_enabled(true);
1003 color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
1004 color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
1005 color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::SourceAlpha);
1006 color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::One);
1007 color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
1008 color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::One);
1009
1010 device
1011 .new_render_pipeline_state(&descriptor)
1012 .expect("could not create render pipeline state")
1013}
1014
1015fn build_path_rasterization_pipeline_state(
1016 device: &metal::DeviceRef,
1017 library: &metal::LibraryRef,
1018 label: &str,
1019 vertex_fn_name: &str,
1020 fragment_fn_name: &str,
1021 pixel_format: metal::MTLPixelFormat,
1022) -> metal::RenderPipelineState {
1023 let vertex_fn = library
1024 .get_function(vertex_fn_name, None)
1025 .expect("error locating vertex function");
1026 let fragment_fn = library
1027 .get_function(fragment_fn_name, None)
1028 .expect("error locating fragment function");
1029
1030 let descriptor = metal::RenderPipelineDescriptor::new();
1031 descriptor.set_label(label);
1032 descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
1033 descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
1034 let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
1035 color_attachment.set_pixel_format(pixel_format);
1036 color_attachment.set_blending_enabled(true);
1037 color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
1038 color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
1039 color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::One);
1040 color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::One);
1041 color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::One);
1042 color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::One);
1043
1044 device
1045 .new_render_pipeline_state(&descriptor)
1046 .expect("could not create render pipeline state")
1047}
1048
1049// Align to multiples of 256 make Metal happy.
1050fn align_offset(offset: &mut usize) {
1051 *offset = ((*offset + 255) / 256) * 256;
1052}
1053
1054#[repr(C)]
1055enum ShadowInputIndex {
1056 Vertices = 0,
1057 Shadows = 1,
1058 ViewportSize = 2,
1059}
1060
1061#[repr(C)]
1062enum QuadInputIndex {
1063 Vertices = 0,
1064 Quads = 1,
1065 ViewportSize = 2,
1066}
1067
1068#[repr(C)]
1069enum UnderlineInputIndex {
1070 Vertices = 0,
1071 Underlines = 1,
1072 ViewportSize = 2,
1073}
1074
1075#[repr(C)]
1076enum SpriteInputIndex {
1077 Vertices = 0,
1078 Sprites = 1,
1079 ViewportSize = 2,
1080 AtlasTextureSize = 3,
1081 AtlasTexture = 4,
1082}
1083
1084#[repr(C)]
1085enum SurfaceInputIndex {
1086 Vertices = 0,
1087 Surfaces = 1,
1088 ViewportSize = 2,
1089 TextureSize = 3,
1090 YTexture = 4,
1091 CbCrTexture = 5,
1092}
1093
1094#[repr(C)]
1095enum PathRasterizationInputIndex {
1096 Vertices = 0,
1097 AtlasTextureSize = 1,
1098}
1099
1100#[derive(Clone, Debug, Eq, PartialEq)]
1101#[repr(C)]
1102pub struct PathSprite {
1103 pub bounds: Bounds<ScaledPixels>,
1104 pub color: Hsla,
1105 pub tile: AtlasTile,
1106}
1107
1108#[derive(Clone, Debug, Eq, PartialEq)]
1109#[repr(C)]
1110pub struct SurfaceBounds {
1111 pub bounds: Bounds<ScaledPixels>,
1112 pub content_mask: ContentMask<ScaledPixels>,
1113}