1use super::metal_atlas::MetalAtlas;
2use crate::{
3 point, size, AtlasTextureId, AtlasTextureKind, AtlasTile, Bounds, ContentMask, DevicePixels,
4 Hsla, MonochromeSprite, Path, PathId, PathVertex, PolychromeSprite, PrimitiveBatch, Quad,
5 ScaledPixels, Scene, Shadow, Size, Surface, Underline,
6};
7use block::ConcreteBlock;
8use cocoa::{
9 base::{NO, YES},
10 foundation::NSUInteger,
11 quartzcore::AutoresizingMask,
12};
13use collections::HashMap;
14use core_foundation::base::TCFType;
15use foreign_types::ForeignType;
16use media::core_video::CVMetalTextureCache;
17use metal::{CAMetalLayer, CommandQueue, MTLPixelFormat, MTLResourceOptions, NSRange};
18use objc::{self, msg_send, sel, sel_impl};
19use parking_lot::Mutex;
20use smallvec::SmallVec;
21use std::{cell::Cell, ffi::c_void, mem, ptr, sync::Arc};
22
23// Exported to metal
24pub(crate) type PointF = crate::Point<f32>;
25
26#[cfg(not(feature = "runtime_shaders"))]
27const SHADERS_METALLIB: &[u8] = include_bytes!(concat!(env!("OUT_DIR"), "/shaders.metallib"));
28#[cfg(feature = "runtime_shaders")]
29const SHADERS_SOURCE_FILE: &str = include_str!(concat!(env!("OUT_DIR"), "/stitched_shaders.metal"));
30const INSTANCE_BUFFER_SIZE: usize = 2 * 1024 * 1024; // This is an arbitrary decision. There's probably a more optimal value (maybe even we could adjust dynamically...)
31
32pub type Context = Arc<Mutex<Vec<metal::Buffer>>>;
33pub type Renderer = MetalRenderer;
34
35pub unsafe fn new_renderer(
36 context: self::Context,
37 _native_window: *mut c_void,
38 _native_view: *mut c_void,
39 _bounds: crate::Size<f32>,
40) -> Renderer {
41 MetalRenderer::new(context)
42}
43
44pub(crate) struct MetalRenderer {
45 device: metal::Device,
46 layer: metal::MetalLayer,
47 presents_with_transaction: bool,
48 command_queue: CommandQueue,
49 paths_rasterization_pipeline_state: metal::RenderPipelineState,
50 path_sprites_pipeline_state: metal::RenderPipelineState,
51 shadows_pipeline_state: metal::RenderPipelineState,
52 quads_pipeline_state: metal::RenderPipelineState,
53 underlines_pipeline_state: metal::RenderPipelineState,
54 monochrome_sprites_pipeline_state: metal::RenderPipelineState,
55 polychrome_sprites_pipeline_state: metal::RenderPipelineState,
56 surfaces_pipeline_state: metal::RenderPipelineState,
57 unit_vertices: metal::Buffer,
58 #[allow(clippy::arc_with_non_send_sync)]
59 instance_buffer_pool: Arc<Mutex<Vec<metal::Buffer>>>,
60 sprite_atlas: Arc<MetalAtlas>,
61 core_video_texture_cache: CVMetalTextureCache,
62}
63
64impl MetalRenderer {
65 pub fn new(instance_buffer_pool: Arc<Mutex<Vec<metal::Buffer>>>) -> Self {
66 let device: metal::Device = if let Some(device) = metal::Device::system_default() {
67 device
68 } else {
69 log::error!("unable to access a compatible graphics device");
70 std::process::exit(1);
71 };
72
73 let layer = metal::MetalLayer::new();
74 layer.set_device(&device);
75 layer.set_pixel_format(MTLPixelFormat::BGRA8Unorm);
76 layer.set_opaque(false);
77 layer.set_maximum_drawable_count(3);
78 unsafe {
79 let _: () = msg_send![&*layer, setAllowsNextDrawableTimeout: NO];
80 let _: () = msg_send![&*layer, setNeedsDisplayOnBoundsChange: YES];
81 let _: () = msg_send![
82 &*layer,
83 setAutoresizingMask: AutoresizingMask::WIDTH_SIZABLE
84 | AutoresizingMask::HEIGHT_SIZABLE
85 ];
86 }
87 #[cfg(feature = "runtime_shaders")]
88 let library = device
89 .new_library_with_source(&SHADERS_SOURCE_FILE, &metal::CompileOptions::new())
90 .expect("error building metal library");
91 #[cfg(not(feature = "runtime_shaders"))]
92 let library = device
93 .new_library_with_data(SHADERS_METALLIB)
94 .expect("error building metal library");
95
96 fn to_float2_bits(point: PointF) -> u64 {
97 let mut output = point.y.to_bits() as u64;
98 output <<= 32;
99 output |= point.x.to_bits() as u64;
100 output
101 }
102
103 let unit_vertices = [
104 to_float2_bits(point(0., 0.)),
105 to_float2_bits(point(1., 0.)),
106 to_float2_bits(point(0., 1.)),
107 to_float2_bits(point(0., 1.)),
108 to_float2_bits(point(1., 0.)),
109 to_float2_bits(point(1., 1.)),
110 ];
111 let unit_vertices = device.new_buffer_with_data(
112 unit_vertices.as_ptr() as *const c_void,
113 mem::size_of_val(&unit_vertices) as u64,
114 MTLResourceOptions::StorageModeManaged,
115 );
116
117 let paths_rasterization_pipeline_state = build_path_rasterization_pipeline_state(
118 &device,
119 &library,
120 "paths_rasterization",
121 "path_rasterization_vertex",
122 "path_rasterization_fragment",
123 MTLPixelFormat::R16Float,
124 );
125 let path_sprites_pipeline_state = build_pipeline_state(
126 &device,
127 &library,
128 "path_sprites",
129 "path_sprite_vertex",
130 "path_sprite_fragment",
131 MTLPixelFormat::BGRA8Unorm,
132 );
133 let shadows_pipeline_state = build_pipeline_state(
134 &device,
135 &library,
136 "shadows",
137 "shadow_vertex",
138 "shadow_fragment",
139 MTLPixelFormat::BGRA8Unorm,
140 );
141 let quads_pipeline_state = build_pipeline_state(
142 &device,
143 &library,
144 "quads",
145 "quad_vertex",
146 "quad_fragment",
147 MTLPixelFormat::BGRA8Unorm,
148 );
149 let underlines_pipeline_state = build_pipeline_state(
150 &device,
151 &library,
152 "underlines",
153 "underline_vertex",
154 "underline_fragment",
155 MTLPixelFormat::BGRA8Unorm,
156 );
157 let monochrome_sprites_pipeline_state = build_pipeline_state(
158 &device,
159 &library,
160 "monochrome_sprites",
161 "monochrome_sprite_vertex",
162 "monochrome_sprite_fragment",
163 MTLPixelFormat::BGRA8Unorm,
164 );
165 let polychrome_sprites_pipeline_state = build_pipeline_state(
166 &device,
167 &library,
168 "polychrome_sprites",
169 "polychrome_sprite_vertex",
170 "polychrome_sprite_fragment",
171 MTLPixelFormat::BGRA8Unorm,
172 );
173 let surfaces_pipeline_state = build_pipeline_state(
174 &device,
175 &library,
176 "surfaces",
177 "surface_vertex",
178 "surface_fragment",
179 MTLPixelFormat::BGRA8Unorm,
180 );
181
182 let command_queue = device.new_command_queue();
183 let sprite_atlas = Arc::new(MetalAtlas::new(device.clone()));
184 let core_video_texture_cache =
185 unsafe { CVMetalTextureCache::new(device.as_ptr()).unwrap() };
186
187 Self {
188 device,
189 layer,
190 presents_with_transaction: false,
191 command_queue,
192 paths_rasterization_pipeline_state,
193 path_sprites_pipeline_state,
194 shadows_pipeline_state,
195 quads_pipeline_state,
196 underlines_pipeline_state,
197 monochrome_sprites_pipeline_state,
198 polychrome_sprites_pipeline_state,
199 surfaces_pipeline_state,
200 unit_vertices,
201 instance_buffer_pool,
202 sprite_atlas,
203 core_video_texture_cache,
204 }
205 }
206
207 pub fn layer(&self) -> &metal::MetalLayerRef {
208 &self.layer
209 }
210
211 pub fn layer_ptr(&self) -> *mut CAMetalLayer {
212 self.layer.as_ptr()
213 }
214
215 pub fn sprite_atlas(&self) -> &Arc<MetalAtlas> {
216 &self.sprite_atlas
217 }
218
219 pub fn set_presents_with_transaction(&mut self, presents_with_transaction: bool) {
220 self.presents_with_transaction = presents_with_transaction;
221 self.layer
222 .set_presents_with_transaction(presents_with_transaction);
223 }
224
225 pub fn update_drawable_size(&mut self, size: Size<f64>) {
226 unsafe {
227 let _: () = msg_send![
228 self.layer(),
229 setDrawableSize: size
230 ];
231 }
232 }
233
234 pub fn destroy(&mut self) {
235 // nothing to do
236 }
237
238 pub fn draw(&mut self, scene: &Scene) {
239 let layer = self.layer.clone();
240 let viewport_size = layer.drawable_size();
241 let viewport_size: Size<DevicePixels> = size(
242 (viewport_size.width.ceil() as i32).into(),
243 (viewport_size.height.ceil() as i32).into(),
244 );
245 let drawable = if let Some(drawable) = layer.next_drawable() {
246 drawable
247 } else {
248 log::error!(
249 "failed to retrieve next drawable, drawable size: {:?}",
250 viewport_size
251 );
252 return;
253 };
254 let mut instance_buffer = self.instance_buffer_pool.lock().pop().unwrap_or_else(|| {
255 self.device.new_buffer(
256 INSTANCE_BUFFER_SIZE as u64,
257 MTLResourceOptions::StorageModeManaged,
258 )
259 });
260 let command_queue = self.command_queue.clone();
261 let command_buffer = command_queue.new_command_buffer();
262 let mut instance_offset = 0;
263
264 let Some(path_tiles) = self.rasterize_paths(
265 scene.paths(),
266 &mut instance_buffer,
267 &mut instance_offset,
268 command_buffer,
269 ) else {
270 log::error!("failed to rasterize {} paths", scene.paths().len());
271 return;
272 };
273
274 let render_pass_descriptor = metal::RenderPassDescriptor::new();
275 let color_attachment = render_pass_descriptor
276 .color_attachments()
277 .object_at(0)
278 .unwrap();
279
280 color_attachment.set_texture(Some(drawable.texture()));
281 color_attachment.set_load_action(metal::MTLLoadAction::Clear);
282 color_attachment.set_store_action(metal::MTLStoreAction::Store);
283 let alpha = if self.layer.is_opaque() { 1. } else { 0. };
284 color_attachment.set_clear_color(metal::MTLClearColor::new(0., 0., 0., alpha));
285 let command_encoder = command_buffer.new_render_command_encoder(render_pass_descriptor);
286
287 command_encoder.set_viewport(metal::MTLViewport {
288 originX: 0.0,
289 originY: 0.0,
290 width: i32::from(viewport_size.width) as f64,
291 height: i32::from(viewport_size.height) as f64,
292 znear: 0.0,
293 zfar: 1.0,
294 });
295
296 for batch in scene.batches() {
297 let ok = match batch {
298 PrimitiveBatch::Shadows(shadows) => self.draw_shadows(
299 shadows,
300 &mut instance_buffer,
301 &mut instance_offset,
302 viewport_size,
303 command_encoder,
304 ),
305 PrimitiveBatch::Quads(quads) => self.draw_quads(
306 quads,
307 &mut instance_buffer,
308 &mut instance_offset,
309 viewport_size,
310 command_encoder,
311 ),
312 PrimitiveBatch::Paths(paths) => self.draw_paths(
313 paths,
314 &path_tiles,
315 &mut instance_buffer,
316 &mut instance_offset,
317 viewport_size,
318 command_encoder,
319 ),
320 PrimitiveBatch::Underlines(underlines) => self.draw_underlines(
321 underlines,
322 &mut instance_buffer,
323 &mut instance_offset,
324 viewport_size,
325 command_encoder,
326 ),
327 PrimitiveBatch::MonochromeSprites {
328 texture_id,
329 sprites,
330 } => self.draw_monochrome_sprites(
331 texture_id,
332 sprites,
333 &mut instance_buffer,
334 &mut instance_offset,
335 viewport_size,
336 command_encoder,
337 ),
338 PrimitiveBatch::PolychromeSprites {
339 texture_id,
340 sprites,
341 } => self.draw_polychrome_sprites(
342 texture_id,
343 sprites,
344 &mut instance_buffer,
345 &mut instance_offset,
346 viewport_size,
347 command_encoder,
348 ),
349 PrimitiveBatch::Surfaces(surfaces) => self.draw_surfaces(
350 surfaces,
351 &mut instance_buffer,
352 &mut instance_offset,
353 viewport_size,
354 command_encoder,
355 ),
356 };
357
358 if !ok {
359 log::error!("scene too large: {} paths, {} shadows, {} quads, {} underlines, {} mono, {} poly, {} surfaces",
360 scene.paths.len(),
361 scene.shadows.len(),
362 scene.quads.len(),
363 scene.underlines.len(),
364 scene.monochrome_sprites.len(),
365 scene.polychrome_sprites.len(),
366 scene.surfaces.len(),
367 );
368 break;
369 }
370 }
371
372 command_encoder.end_encoding();
373
374 instance_buffer.did_modify_range(NSRange {
375 location: 0,
376 length: instance_offset as NSUInteger,
377 });
378
379 let instance_buffer_pool = self.instance_buffer_pool.clone();
380 let instance_buffer = Cell::new(Some(instance_buffer));
381 let block = ConcreteBlock::new(move |_| {
382 if let Some(instance_buffer) = instance_buffer.take() {
383 instance_buffer_pool.lock().push(instance_buffer);
384 }
385 });
386 let block = block.copy();
387 command_buffer.add_completed_handler(&block);
388
389 self.sprite_atlas.clear_textures(AtlasTextureKind::Path);
390
391 if self.presents_with_transaction {
392 command_buffer.commit();
393 command_buffer.wait_until_scheduled();
394 drawable.present();
395 } else {
396 command_buffer.present_drawable(drawable);
397 command_buffer.commit();
398 }
399 }
400
401 fn rasterize_paths(
402 &mut self,
403 paths: &[Path<ScaledPixels>],
404 instance_buffer: &mut metal::Buffer,
405 instance_offset: &mut usize,
406 command_buffer: &metal::CommandBufferRef,
407 ) -> Option<HashMap<PathId, AtlasTile>> {
408 let mut tiles = HashMap::default();
409 let mut vertices_by_texture_id = HashMap::default();
410 for path in paths {
411 let clipped_bounds = path.bounds.intersect(&path.content_mask.bounds);
412
413 let tile = self
414 .sprite_atlas
415 .allocate(clipped_bounds.size.map(Into::into), AtlasTextureKind::Path);
416 vertices_by_texture_id
417 .entry(tile.texture_id)
418 .or_insert(Vec::new())
419 .extend(path.vertices.iter().map(|vertex| PathVertex {
420 xy_position: vertex.xy_position - clipped_bounds.origin
421 + tile.bounds.origin.map(Into::into),
422 st_position: vertex.st_position,
423 content_mask: ContentMask {
424 bounds: tile.bounds.map(Into::into),
425 },
426 }));
427 tiles.insert(path.id, tile);
428 }
429
430 for (texture_id, vertices) in vertices_by_texture_id {
431 align_offset(instance_offset);
432 let vertices_bytes_len = mem::size_of_val(vertices.as_slice());
433 let next_offset = *instance_offset + vertices_bytes_len;
434 if next_offset > INSTANCE_BUFFER_SIZE {
435 return None;
436 }
437
438 let render_pass_descriptor = metal::RenderPassDescriptor::new();
439 let color_attachment = render_pass_descriptor
440 .color_attachments()
441 .object_at(0)
442 .unwrap();
443
444 let texture = self.sprite_atlas.metal_texture(texture_id);
445 color_attachment.set_texture(Some(&texture));
446 color_attachment.set_load_action(metal::MTLLoadAction::Clear);
447 color_attachment.set_store_action(metal::MTLStoreAction::Store);
448 color_attachment.set_clear_color(metal::MTLClearColor::new(0., 0., 0., 1.));
449 let command_encoder = command_buffer.new_render_command_encoder(render_pass_descriptor);
450 command_encoder.set_render_pipeline_state(&self.paths_rasterization_pipeline_state);
451 command_encoder.set_vertex_buffer(
452 PathRasterizationInputIndex::Vertices as u64,
453 Some(instance_buffer),
454 *instance_offset as u64,
455 );
456 let texture_size = Size {
457 width: DevicePixels::from(texture.width()),
458 height: DevicePixels::from(texture.height()),
459 };
460 command_encoder.set_vertex_bytes(
461 PathRasterizationInputIndex::AtlasTextureSize as u64,
462 mem::size_of_val(&texture_size) as u64,
463 &texture_size as *const Size<DevicePixels> as *const _,
464 );
465
466 let buffer_contents =
467 unsafe { (instance_buffer.contents() as *mut u8).add(*instance_offset) };
468 unsafe {
469 ptr::copy_nonoverlapping(
470 vertices.as_ptr() as *const u8,
471 buffer_contents,
472 vertices_bytes_len,
473 );
474 }
475
476 command_encoder.draw_primitives(
477 metal::MTLPrimitiveType::Triangle,
478 0,
479 vertices.len() as u64,
480 );
481 command_encoder.end_encoding();
482 *instance_offset = next_offset;
483 }
484
485 Some(tiles)
486 }
487
488 fn draw_shadows(
489 &mut self,
490 shadows: &[Shadow],
491 instance_buffer: &mut metal::Buffer,
492 instance_offset: &mut usize,
493 viewport_size: Size<DevicePixels>,
494 command_encoder: &metal::RenderCommandEncoderRef,
495 ) -> bool {
496 if shadows.is_empty() {
497 return true;
498 }
499 align_offset(instance_offset);
500
501 command_encoder.set_render_pipeline_state(&self.shadows_pipeline_state);
502 command_encoder.set_vertex_buffer(
503 ShadowInputIndex::Vertices as u64,
504 Some(&self.unit_vertices),
505 0,
506 );
507 command_encoder.set_vertex_buffer(
508 ShadowInputIndex::Shadows as u64,
509 Some(instance_buffer),
510 *instance_offset as u64,
511 );
512 command_encoder.set_fragment_buffer(
513 ShadowInputIndex::Shadows as u64,
514 Some(instance_buffer),
515 *instance_offset as u64,
516 );
517
518 command_encoder.set_vertex_bytes(
519 ShadowInputIndex::ViewportSize as u64,
520 mem::size_of_val(&viewport_size) as u64,
521 &viewport_size as *const Size<DevicePixels> as *const _,
522 );
523
524 let shadow_bytes_len = mem::size_of_val(shadows);
525 let buffer_contents =
526 unsafe { (instance_buffer.contents() as *mut u8).add(*instance_offset) };
527
528 let next_offset = *instance_offset + shadow_bytes_len;
529 if next_offset > INSTANCE_BUFFER_SIZE {
530 return false;
531 }
532
533 unsafe {
534 ptr::copy_nonoverlapping(
535 shadows.as_ptr() as *const u8,
536 buffer_contents,
537 shadow_bytes_len,
538 );
539 }
540
541 command_encoder.draw_primitives_instanced(
542 metal::MTLPrimitiveType::Triangle,
543 0,
544 6,
545 shadows.len() as u64,
546 );
547 *instance_offset = next_offset;
548 true
549 }
550
551 fn draw_quads(
552 &mut self,
553 quads: &[Quad],
554 instance_buffer: &mut metal::Buffer,
555 instance_offset: &mut usize,
556 viewport_size: Size<DevicePixels>,
557 command_encoder: &metal::RenderCommandEncoderRef,
558 ) -> bool {
559 if quads.is_empty() {
560 return true;
561 }
562 align_offset(instance_offset);
563
564 command_encoder.set_render_pipeline_state(&self.quads_pipeline_state);
565 command_encoder.set_vertex_buffer(
566 QuadInputIndex::Vertices as u64,
567 Some(&self.unit_vertices),
568 0,
569 );
570 command_encoder.set_vertex_buffer(
571 QuadInputIndex::Quads as u64,
572 Some(instance_buffer),
573 *instance_offset as u64,
574 );
575 command_encoder.set_fragment_buffer(
576 QuadInputIndex::Quads as u64,
577 Some(instance_buffer),
578 *instance_offset as u64,
579 );
580
581 command_encoder.set_vertex_bytes(
582 QuadInputIndex::ViewportSize as u64,
583 mem::size_of_val(&viewport_size) as u64,
584 &viewport_size as *const Size<DevicePixels> as *const _,
585 );
586
587 let quad_bytes_len = mem::size_of_val(quads);
588 let buffer_contents =
589 unsafe { (instance_buffer.contents() as *mut u8).add(*instance_offset) };
590
591 let next_offset = *instance_offset + quad_bytes_len;
592 if next_offset > INSTANCE_BUFFER_SIZE {
593 return false;
594 }
595
596 unsafe {
597 ptr::copy_nonoverlapping(quads.as_ptr() as *const u8, buffer_contents, quad_bytes_len);
598 }
599
600 command_encoder.draw_primitives_instanced(
601 metal::MTLPrimitiveType::Triangle,
602 0,
603 6,
604 quads.len() as u64,
605 );
606 *instance_offset = next_offset;
607 true
608 }
609
610 fn draw_paths(
611 &mut self,
612 paths: &[Path<ScaledPixels>],
613 tiles_by_path_id: &HashMap<PathId, AtlasTile>,
614 instance_buffer: &mut metal::Buffer,
615 instance_offset: &mut usize,
616 viewport_size: Size<DevicePixels>,
617 command_encoder: &metal::RenderCommandEncoderRef,
618 ) -> bool {
619 if paths.is_empty() {
620 return true;
621 }
622
623 command_encoder.set_render_pipeline_state(&self.path_sprites_pipeline_state);
624 command_encoder.set_vertex_buffer(
625 SpriteInputIndex::Vertices as u64,
626 Some(&self.unit_vertices),
627 0,
628 );
629 command_encoder.set_vertex_bytes(
630 SpriteInputIndex::ViewportSize as u64,
631 mem::size_of_val(&viewport_size) as u64,
632 &viewport_size as *const Size<DevicePixels> as *const _,
633 );
634
635 let mut prev_texture_id = None;
636 let mut sprites = SmallVec::<[_; 1]>::new();
637 let mut paths_and_tiles = paths
638 .iter()
639 .map(|path| (path, tiles_by_path_id.get(&path.id).unwrap()))
640 .peekable();
641
642 loop {
643 if let Some((path, tile)) = paths_and_tiles.peek() {
644 if prev_texture_id.map_or(true, |texture_id| texture_id == tile.texture_id) {
645 prev_texture_id = Some(tile.texture_id);
646 let origin = path.bounds.intersect(&path.content_mask.bounds).origin;
647 sprites.push(PathSprite {
648 bounds: Bounds {
649 origin: origin.map(|p| p.floor()),
650 size: tile.bounds.size.map(Into::into),
651 },
652 color: path.color,
653 tile: (*tile).clone(),
654 });
655 paths_and_tiles.next();
656 continue;
657 }
658 }
659
660 if sprites.is_empty() {
661 break;
662 } else {
663 align_offset(instance_offset);
664 let texture_id = prev_texture_id.take().unwrap();
665 let texture: metal::Texture = self.sprite_atlas.metal_texture(texture_id);
666 let texture_size = size(
667 DevicePixels(texture.width() as i32),
668 DevicePixels(texture.height() as i32),
669 );
670
671 command_encoder.set_vertex_buffer(
672 SpriteInputIndex::Sprites as u64,
673 Some(instance_buffer),
674 *instance_offset as u64,
675 );
676 command_encoder.set_vertex_bytes(
677 SpriteInputIndex::AtlasTextureSize as u64,
678 mem::size_of_val(&texture_size) as u64,
679 &texture_size as *const Size<DevicePixels> as *const _,
680 );
681 command_encoder.set_fragment_buffer(
682 SpriteInputIndex::Sprites as u64,
683 Some(instance_buffer),
684 *instance_offset as u64,
685 );
686 command_encoder
687 .set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
688
689 let sprite_bytes_len = mem::size_of_val(sprites.as_slice());
690 let next_offset = *instance_offset + sprite_bytes_len;
691 if next_offset > INSTANCE_BUFFER_SIZE {
692 return false;
693 }
694
695 let buffer_contents =
696 unsafe { (instance_buffer.contents() as *mut u8).add(*instance_offset) };
697
698 unsafe {
699 ptr::copy_nonoverlapping(
700 sprites.as_ptr() as *const u8,
701 buffer_contents,
702 sprite_bytes_len,
703 );
704 }
705
706 command_encoder.draw_primitives_instanced(
707 metal::MTLPrimitiveType::Triangle,
708 0,
709 6,
710 sprites.len() as u64,
711 );
712 *instance_offset = next_offset;
713 sprites.clear();
714 }
715 }
716 true
717 }
718
719 fn draw_underlines(
720 &mut self,
721 underlines: &[Underline],
722 instance_buffer: &mut metal::Buffer,
723 instance_offset: &mut usize,
724 viewport_size: Size<DevicePixels>,
725 command_encoder: &metal::RenderCommandEncoderRef,
726 ) -> bool {
727 if underlines.is_empty() {
728 return true;
729 }
730 align_offset(instance_offset);
731
732 command_encoder.set_render_pipeline_state(&self.underlines_pipeline_state);
733 command_encoder.set_vertex_buffer(
734 UnderlineInputIndex::Vertices as u64,
735 Some(&self.unit_vertices),
736 0,
737 );
738 command_encoder.set_vertex_buffer(
739 UnderlineInputIndex::Underlines as u64,
740 Some(instance_buffer),
741 *instance_offset as u64,
742 );
743 command_encoder.set_fragment_buffer(
744 UnderlineInputIndex::Underlines as u64,
745 Some(instance_buffer),
746 *instance_offset as u64,
747 );
748
749 command_encoder.set_vertex_bytes(
750 UnderlineInputIndex::ViewportSize as u64,
751 mem::size_of_val(&viewport_size) as u64,
752 &viewport_size as *const Size<DevicePixels> as *const _,
753 );
754
755 let underline_bytes_len = mem::size_of_val(underlines);
756 let buffer_contents =
757 unsafe { (instance_buffer.contents() as *mut u8).add(*instance_offset) };
758
759 let next_offset = *instance_offset + underline_bytes_len;
760 if next_offset > INSTANCE_BUFFER_SIZE {
761 return false;
762 }
763
764 unsafe {
765 ptr::copy_nonoverlapping(
766 underlines.as_ptr() as *const u8,
767 buffer_contents,
768 underline_bytes_len,
769 );
770 }
771
772 command_encoder.draw_primitives_instanced(
773 metal::MTLPrimitiveType::Triangle,
774 0,
775 6,
776 underlines.len() as u64,
777 );
778 *instance_offset = next_offset;
779 true
780 }
781
782 fn draw_monochrome_sprites(
783 &mut self,
784 texture_id: AtlasTextureId,
785 sprites: &[MonochromeSprite],
786 instance_buffer: &mut metal::Buffer,
787 instance_offset: &mut usize,
788 viewport_size: Size<DevicePixels>,
789 command_encoder: &metal::RenderCommandEncoderRef,
790 ) -> bool {
791 if sprites.is_empty() {
792 return true;
793 }
794 align_offset(instance_offset);
795
796 let texture = self.sprite_atlas.metal_texture(texture_id);
797 let texture_size = size(
798 DevicePixels(texture.width() as i32),
799 DevicePixels(texture.height() as i32),
800 );
801 command_encoder.set_render_pipeline_state(&self.monochrome_sprites_pipeline_state);
802 command_encoder.set_vertex_buffer(
803 SpriteInputIndex::Vertices as u64,
804 Some(&self.unit_vertices),
805 0,
806 );
807 command_encoder.set_vertex_buffer(
808 SpriteInputIndex::Sprites as u64,
809 Some(instance_buffer),
810 *instance_offset as u64,
811 );
812 command_encoder.set_vertex_bytes(
813 SpriteInputIndex::ViewportSize as u64,
814 mem::size_of_val(&viewport_size) as u64,
815 &viewport_size as *const Size<DevicePixels> as *const _,
816 );
817 command_encoder.set_vertex_bytes(
818 SpriteInputIndex::AtlasTextureSize as u64,
819 mem::size_of_val(&texture_size) as u64,
820 &texture_size as *const Size<DevicePixels> as *const _,
821 );
822 command_encoder.set_fragment_buffer(
823 SpriteInputIndex::Sprites as u64,
824 Some(instance_buffer),
825 *instance_offset as u64,
826 );
827 command_encoder.set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
828
829 let sprite_bytes_len = mem::size_of_val(sprites);
830 let buffer_contents =
831 unsafe { (instance_buffer.contents() as *mut u8).add(*instance_offset) };
832
833 let next_offset = *instance_offset + sprite_bytes_len;
834 if next_offset > INSTANCE_BUFFER_SIZE {
835 return false;
836 }
837
838 unsafe {
839 ptr::copy_nonoverlapping(
840 sprites.as_ptr() as *const u8,
841 buffer_contents,
842 sprite_bytes_len,
843 );
844 }
845
846 command_encoder.draw_primitives_instanced(
847 metal::MTLPrimitiveType::Triangle,
848 0,
849 6,
850 sprites.len() as u64,
851 );
852 *instance_offset = next_offset;
853 true
854 }
855
856 fn draw_polychrome_sprites(
857 &mut self,
858 texture_id: AtlasTextureId,
859 sprites: &[PolychromeSprite],
860 instance_buffer: &mut metal::Buffer,
861 instance_offset: &mut usize,
862 viewport_size: Size<DevicePixels>,
863 command_encoder: &metal::RenderCommandEncoderRef,
864 ) -> bool {
865 if sprites.is_empty() {
866 return true;
867 }
868 align_offset(instance_offset);
869
870 let texture = self.sprite_atlas.metal_texture(texture_id);
871 let texture_size = size(
872 DevicePixels(texture.width() as i32),
873 DevicePixels(texture.height() as i32),
874 );
875 command_encoder.set_render_pipeline_state(&self.polychrome_sprites_pipeline_state);
876 command_encoder.set_vertex_buffer(
877 SpriteInputIndex::Vertices as u64,
878 Some(&self.unit_vertices),
879 0,
880 );
881 command_encoder.set_vertex_buffer(
882 SpriteInputIndex::Sprites as u64,
883 Some(instance_buffer),
884 *instance_offset as u64,
885 );
886 command_encoder.set_vertex_bytes(
887 SpriteInputIndex::ViewportSize as u64,
888 mem::size_of_val(&viewport_size) as u64,
889 &viewport_size as *const Size<DevicePixels> as *const _,
890 );
891 command_encoder.set_vertex_bytes(
892 SpriteInputIndex::AtlasTextureSize as u64,
893 mem::size_of_val(&texture_size) as u64,
894 &texture_size as *const Size<DevicePixels> as *const _,
895 );
896 command_encoder.set_fragment_buffer(
897 SpriteInputIndex::Sprites as u64,
898 Some(instance_buffer),
899 *instance_offset as u64,
900 );
901 command_encoder.set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
902
903 let sprite_bytes_len = mem::size_of_val(sprites);
904 let buffer_contents =
905 unsafe { (instance_buffer.contents() as *mut u8).add(*instance_offset) };
906
907 let next_offset = *instance_offset + sprite_bytes_len;
908 if next_offset > INSTANCE_BUFFER_SIZE {
909 return false;
910 }
911
912 unsafe {
913 ptr::copy_nonoverlapping(
914 sprites.as_ptr() as *const u8,
915 buffer_contents,
916 sprite_bytes_len,
917 );
918 }
919
920 command_encoder.draw_primitives_instanced(
921 metal::MTLPrimitiveType::Triangle,
922 0,
923 6,
924 sprites.len() as u64,
925 );
926 *instance_offset = next_offset;
927 true
928 }
929
930 fn draw_surfaces(
931 &mut self,
932 surfaces: &[Surface],
933 instance_buffer: &mut metal::Buffer,
934 instance_offset: &mut usize,
935 viewport_size: Size<DevicePixels>,
936 command_encoder: &metal::RenderCommandEncoderRef,
937 ) -> bool {
938 command_encoder.set_render_pipeline_state(&self.surfaces_pipeline_state);
939 command_encoder.set_vertex_buffer(
940 SurfaceInputIndex::Vertices as u64,
941 Some(&self.unit_vertices),
942 0,
943 );
944 command_encoder.set_vertex_bytes(
945 SurfaceInputIndex::ViewportSize as u64,
946 mem::size_of_val(&viewport_size) as u64,
947 &viewport_size as *const Size<DevicePixels> as *const _,
948 );
949
950 for surface in surfaces {
951 let texture_size = size(
952 DevicePixels::from(surface.image_buffer.width() as i32),
953 DevicePixels::from(surface.image_buffer.height() as i32),
954 );
955
956 assert_eq!(
957 surface.image_buffer.pixel_format_type(),
958 media::core_video::kCVPixelFormatType_420YpCbCr8BiPlanarFullRange
959 );
960
961 let y_texture = unsafe {
962 self.core_video_texture_cache
963 .create_texture_from_image(
964 surface.image_buffer.as_concrete_TypeRef(),
965 ptr::null(),
966 MTLPixelFormat::R8Unorm,
967 surface.image_buffer.plane_width(0),
968 surface.image_buffer.plane_height(0),
969 0,
970 )
971 .unwrap()
972 };
973 let cb_cr_texture = unsafe {
974 self.core_video_texture_cache
975 .create_texture_from_image(
976 surface.image_buffer.as_concrete_TypeRef(),
977 ptr::null(),
978 MTLPixelFormat::RG8Unorm,
979 surface.image_buffer.plane_width(1),
980 surface.image_buffer.plane_height(1),
981 1,
982 )
983 .unwrap()
984 };
985
986 align_offset(instance_offset);
987 let next_offset = *instance_offset + mem::size_of::<Surface>();
988 if next_offset > INSTANCE_BUFFER_SIZE {
989 return false;
990 }
991
992 command_encoder.set_vertex_buffer(
993 SurfaceInputIndex::Surfaces as u64,
994 Some(instance_buffer),
995 *instance_offset as u64,
996 );
997 command_encoder.set_vertex_bytes(
998 SurfaceInputIndex::TextureSize as u64,
999 mem::size_of_val(&texture_size) as u64,
1000 &texture_size as *const Size<DevicePixels> as *const _,
1001 );
1002 command_encoder.set_fragment_texture(
1003 SurfaceInputIndex::YTexture as u64,
1004 Some(y_texture.as_texture_ref()),
1005 );
1006 command_encoder.set_fragment_texture(
1007 SurfaceInputIndex::CbCrTexture as u64,
1008 Some(cb_cr_texture.as_texture_ref()),
1009 );
1010
1011 unsafe {
1012 let buffer_contents = (instance_buffer.contents() as *mut u8).add(*instance_offset)
1013 as *mut SurfaceBounds;
1014 ptr::write(
1015 buffer_contents,
1016 SurfaceBounds {
1017 bounds: surface.bounds,
1018 content_mask: surface.content_mask.clone(),
1019 },
1020 );
1021 }
1022
1023 command_encoder.draw_primitives(metal::MTLPrimitiveType::Triangle, 0, 6);
1024 *instance_offset = next_offset;
1025 }
1026 true
1027 }
1028}
1029
1030fn build_pipeline_state(
1031 device: &metal::DeviceRef,
1032 library: &metal::LibraryRef,
1033 label: &str,
1034 vertex_fn_name: &str,
1035 fragment_fn_name: &str,
1036 pixel_format: metal::MTLPixelFormat,
1037) -> metal::RenderPipelineState {
1038 let vertex_fn = library
1039 .get_function(vertex_fn_name, None)
1040 .expect("error locating vertex function");
1041 let fragment_fn = library
1042 .get_function(fragment_fn_name, None)
1043 .expect("error locating fragment function");
1044
1045 let descriptor = metal::RenderPipelineDescriptor::new();
1046 descriptor.set_label(label);
1047 descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
1048 descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
1049 let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
1050 color_attachment.set_pixel_format(pixel_format);
1051 color_attachment.set_blending_enabled(true);
1052 color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
1053 color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
1054 color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::SourceAlpha);
1055 color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::One);
1056 color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
1057 color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::One);
1058
1059 device
1060 .new_render_pipeline_state(&descriptor)
1061 .expect("could not create render pipeline state")
1062}
1063
1064fn build_path_rasterization_pipeline_state(
1065 device: &metal::DeviceRef,
1066 library: &metal::LibraryRef,
1067 label: &str,
1068 vertex_fn_name: &str,
1069 fragment_fn_name: &str,
1070 pixel_format: metal::MTLPixelFormat,
1071) -> metal::RenderPipelineState {
1072 let vertex_fn = library
1073 .get_function(vertex_fn_name, None)
1074 .expect("error locating vertex function");
1075 let fragment_fn = library
1076 .get_function(fragment_fn_name, None)
1077 .expect("error locating fragment function");
1078
1079 let descriptor = metal::RenderPipelineDescriptor::new();
1080 descriptor.set_label(label);
1081 descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
1082 descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
1083 let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
1084 color_attachment.set_pixel_format(pixel_format);
1085 color_attachment.set_blending_enabled(true);
1086 color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
1087 color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
1088 color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::One);
1089 color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::One);
1090 color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::One);
1091 color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::One);
1092
1093 device
1094 .new_render_pipeline_state(&descriptor)
1095 .expect("could not create render pipeline state")
1096}
1097
1098// Align to multiples of 256 make Metal happy.
1099fn align_offset(offset: &mut usize) {
1100 *offset = ((*offset + 255) / 256) * 256;
1101}
1102
1103#[repr(C)]
1104enum ShadowInputIndex {
1105 Vertices = 0,
1106 Shadows = 1,
1107 ViewportSize = 2,
1108}
1109
1110#[repr(C)]
1111enum QuadInputIndex {
1112 Vertices = 0,
1113 Quads = 1,
1114 ViewportSize = 2,
1115}
1116
1117#[repr(C)]
1118enum UnderlineInputIndex {
1119 Vertices = 0,
1120 Underlines = 1,
1121 ViewportSize = 2,
1122}
1123
1124#[repr(C)]
1125enum SpriteInputIndex {
1126 Vertices = 0,
1127 Sprites = 1,
1128 ViewportSize = 2,
1129 AtlasTextureSize = 3,
1130 AtlasTexture = 4,
1131}
1132
1133#[repr(C)]
1134enum SurfaceInputIndex {
1135 Vertices = 0,
1136 Surfaces = 1,
1137 ViewportSize = 2,
1138 TextureSize = 3,
1139 YTexture = 4,
1140 CbCrTexture = 5,
1141}
1142
1143#[repr(C)]
1144enum PathRasterizationInputIndex {
1145 Vertices = 0,
1146 AtlasTextureSize = 1,
1147}
1148
1149#[derive(Clone, Debug, Eq, PartialEq)]
1150#[repr(C)]
1151pub struct PathSprite {
1152 pub bounds: Bounds<ScaledPixels>,
1153 pub color: Hsla,
1154 pub tile: AtlasTile,
1155}
1156
1157#[derive(Clone, Debug, Eq, PartialEq)]
1158#[repr(C)]
1159pub struct SurfaceBounds {
1160 pub bounds: Bounds<ScaledPixels>,
1161 pub content_mask: ContentMask<ScaledPixels>,
1162}