1use super::metal_atlas::MetalAtlas;
2use crate::{
3 point, size, AtlasTextureId, AtlasTextureKind, AtlasTile, Bounds, ContentMask, DevicePixels,
4 Hsla, MonochromeSprite, Path, PathId, PathVertex, PolychromeSprite, PrimitiveBatch, Quad,
5 ScaledPixels, Scene, Shadow, Size, Surface, Underline,
6};
7use block::ConcreteBlock;
8use cocoa::{
9 base::{NO, YES},
10 foundation::NSUInteger,
11 quartzcore::AutoresizingMask,
12};
13use collections::HashMap;
14use core_foundation::base::TCFType;
15use foreign_types::ForeignType;
16use media::core_video::CVMetalTextureCache;
17use metal::{CAMetalLayer, CommandQueue, MTLPixelFormat, MTLResourceOptions, NSRange};
18use objc::{self, msg_send, sel, sel_impl};
19use parking_lot::Mutex;
20use smallvec::SmallVec;
21use std::{cell::Cell, ffi::c_void, mem, ptr, sync::Arc};
22
23// Exported to metal
24pub(crate) type PointF = crate::Point<f32>;
25
26#[cfg(not(feature = "runtime_shaders"))]
27const SHADERS_METALLIB: &[u8] = include_bytes!(concat!(env!("OUT_DIR"), "/shaders.metallib"));
28#[cfg(feature = "runtime_shaders")]
29const SHADERS_SOURCE_FILE: &str = include_str!(concat!(env!("OUT_DIR"), "/stitched_shaders.metal"));
30const INSTANCE_BUFFER_SIZE: usize = 2 * 1024 * 1024; // This is an arbitrary decision. There's probably a more optimal value (maybe even we could adjust dynamically...)
31
32pub type Context = Arc<Mutex<Vec<metal::Buffer>>>;
33pub type Renderer = MetalRenderer;
34
35pub unsafe fn new_renderer(
36 context: self::Context,
37 _native_window: *mut c_void,
38 _native_view: *mut c_void,
39 _bounds: crate::Size<f32>,
40 _transparent: bool,
41) -> Renderer {
42 MetalRenderer::new(context)
43}
44
45pub(crate) struct MetalRenderer {
46 device: metal::Device,
47 layer: metal::MetalLayer,
48 presents_with_transaction: bool,
49 command_queue: CommandQueue,
50 paths_rasterization_pipeline_state: metal::RenderPipelineState,
51 path_sprites_pipeline_state: metal::RenderPipelineState,
52 shadows_pipeline_state: metal::RenderPipelineState,
53 quads_pipeline_state: metal::RenderPipelineState,
54 underlines_pipeline_state: metal::RenderPipelineState,
55 monochrome_sprites_pipeline_state: metal::RenderPipelineState,
56 polychrome_sprites_pipeline_state: metal::RenderPipelineState,
57 surfaces_pipeline_state: metal::RenderPipelineState,
58 unit_vertices: metal::Buffer,
59 #[allow(clippy::arc_with_non_send_sync)]
60 instance_buffer_pool: Arc<Mutex<Vec<metal::Buffer>>>,
61 sprite_atlas: Arc<MetalAtlas>,
62 core_video_texture_cache: CVMetalTextureCache,
63}
64
65impl MetalRenderer {
66 pub fn new(instance_buffer_pool: Arc<Mutex<Vec<metal::Buffer>>>) -> Self {
67 let device: metal::Device = if let Some(device) = metal::Device::system_default() {
68 device
69 } else {
70 log::error!("unable to access a compatible graphics device");
71 std::process::exit(1);
72 };
73
74 let layer = metal::MetalLayer::new();
75 layer.set_device(&device);
76 layer.set_pixel_format(MTLPixelFormat::BGRA8Unorm);
77 layer.set_opaque(false);
78 layer.set_maximum_drawable_count(3);
79 unsafe {
80 let _: () = msg_send![&*layer, setAllowsNextDrawableTimeout: NO];
81 let _: () = msg_send![&*layer, setNeedsDisplayOnBoundsChange: YES];
82 let _: () = msg_send![
83 &*layer,
84 setAutoresizingMask: AutoresizingMask::WIDTH_SIZABLE
85 | AutoresizingMask::HEIGHT_SIZABLE
86 ];
87 }
88 #[cfg(feature = "runtime_shaders")]
89 let library = device
90 .new_library_with_source(&SHADERS_SOURCE_FILE, &metal::CompileOptions::new())
91 .expect("error building metal library");
92 #[cfg(not(feature = "runtime_shaders"))]
93 let library = device
94 .new_library_with_data(SHADERS_METALLIB)
95 .expect("error building metal library");
96
97 fn to_float2_bits(point: PointF) -> u64 {
98 let mut output = point.y.to_bits() as u64;
99 output <<= 32;
100 output |= point.x.to_bits() as u64;
101 output
102 }
103
104 let unit_vertices = [
105 to_float2_bits(point(0., 0.)),
106 to_float2_bits(point(1., 0.)),
107 to_float2_bits(point(0., 1.)),
108 to_float2_bits(point(0., 1.)),
109 to_float2_bits(point(1., 0.)),
110 to_float2_bits(point(1., 1.)),
111 ];
112 let unit_vertices = device.new_buffer_with_data(
113 unit_vertices.as_ptr() as *const c_void,
114 mem::size_of_val(&unit_vertices) as u64,
115 MTLResourceOptions::StorageModeManaged,
116 );
117
118 let paths_rasterization_pipeline_state = build_path_rasterization_pipeline_state(
119 &device,
120 &library,
121 "paths_rasterization",
122 "path_rasterization_vertex",
123 "path_rasterization_fragment",
124 MTLPixelFormat::R16Float,
125 );
126 let path_sprites_pipeline_state = build_pipeline_state(
127 &device,
128 &library,
129 "path_sprites",
130 "path_sprite_vertex",
131 "path_sprite_fragment",
132 MTLPixelFormat::BGRA8Unorm,
133 );
134 let shadows_pipeline_state = build_pipeline_state(
135 &device,
136 &library,
137 "shadows",
138 "shadow_vertex",
139 "shadow_fragment",
140 MTLPixelFormat::BGRA8Unorm,
141 );
142 let quads_pipeline_state = build_pipeline_state(
143 &device,
144 &library,
145 "quads",
146 "quad_vertex",
147 "quad_fragment",
148 MTLPixelFormat::BGRA8Unorm,
149 );
150 let underlines_pipeline_state = build_pipeline_state(
151 &device,
152 &library,
153 "underlines",
154 "underline_vertex",
155 "underline_fragment",
156 MTLPixelFormat::BGRA8Unorm,
157 );
158 let monochrome_sprites_pipeline_state = build_pipeline_state(
159 &device,
160 &library,
161 "monochrome_sprites",
162 "monochrome_sprite_vertex",
163 "monochrome_sprite_fragment",
164 MTLPixelFormat::BGRA8Unorm,
165 );
166 let polychrome_sprites_pipeline_state = build_pipeline_state(
167 &device,
168 &library,
169 "polychrome_sprites",
170 "polychrome_sprite_vertex",
171 "polychrome_sprite_fragment",
172 MTLPixelFormat::BGRA8Unorm,
173 );
174 let surfaces_pipeline_state = build_pipeline_state(
175 &device,
176 &library,
177 "surfaces",
178 "surface_vertex",
179 "surface_fragment",
180 MTLPixelFormat::BGRA8Unorm,
181 );
182
183 let command_queue = device.new_command_queue();
184 let sprite_atlas = Arc::new(MetalAtlas::new(device.clone()));
185 let core_video_texture_cache =
186 unsafe { CVMetalTextureCache::new(device.as_ptr()).unwrap() };
187
188 Self {
189 device,
190 layer,
191 presents_with_transaction: false,
192 command_queue,
193 paths_rasterization_pipeline_state,
194 path_sprites_pipeline_state,
195 shadows_pipeline_state,
196 quads_pipeline_state,
197 underlines_pipeline_state,
198 monochrome_sprites_pipeline_state,
199 polychrome_sprites_pipeline_state,
200 surfaces_pipeline_state,
201 unit_vertices,
202 instance_buffer_pool,
203 sprite_atlas,
204 core_video_texture_cache,
205 }
206 }
207
208 pub fn layer(&self) -> &metal::MetalLayerRef {
209 &self.layer
210 }
211
212 pub fn layer_ptr(&self) -> *mut CAMetalLayer {
213 self.layer.as_ptr()
214 }
215
216 pub fn sprite_atlas(&self) -> &Arc<MetalAtlas> {
217 &self.sprite_atlas
218 }
219
220 pub fn set_presents_with_transaction(&mut self, presents_with_transaction: bool) {
221 self.presents_with_transaction = presents_with_transaction;
222 self.layer
223 .set_presents_with_transaction(presents_with_transaction);
224 }
225
226 pub fn update_drawable_size(&mut self, size: Size<f64>) {
227 unsafe {
228 let _: () = msg_send![
229 self.layer(),
230 setDrawableSize: size
231 ];
232 }
233 }
234
235 pub fn update_transparency(&mut self, _transparent: bool) {
236 // todo(mac)?
237 }
238
239 pub fn destroy(&mut self) {
240 // nothing to do
241 }
242
243 pub fn draw(&mut self, scene: &Scene) {
244 let layer = self.layer.clone();
245 let viewport_size = layer.drawable_size();
246 let viewport_size: Size<DevicePixels> = size(
247 (viewport_size.width.ceil() as i32).into(),
248 (viewport_size.height.ceil() as i32).into(),
249 );
250 let drawable = if let Some(drawable) = layer.next_drawable() {
251 drawable
252 } else {
253 log::error!(
254 "failed to retrieve next drawable, drawable size: {:?}",
255 viewport_size
256 );
257 return;
258 };
259 let mut instance_buffer = self.instance_buffer_pool.lock().pop().unwrap_or_else(|| {
260 self.device.new_buffer(
261 INSTANCE_BUFFER_SIZE as u64,
262 MTLResourceOptions::StorageModeManaged,
263 )
264 });
265 let command_queue = self.command_queue.clone();
266 let command_buffer = command_queue.new_command_buffer();
267 let mut instance_offset = 0;
268
269 let Some(path_tiles) = self.rasterize_paths(
270 scene.paths(),
271 &mut instance_buffer,
272 &mut instance_offset,
273 command_buffer,
274 ) else {
275 log::error!("failed to rasterize {} paths", scene.paths().len());
276 return;
277 };
278
279 let render_pass_descriptor = metal::RenderPassDescriptor::new();
280 let color_attachment = render_pass_descriptor
281 .color_attachments()
282 .object_at(0)
283 .unwrap();
284
285 color_attachment.set_texture(Some(drawable.texture()));
286 color_attachment.set_load_action(metal::MTLLoadAction::Clear);
287 color_attachment.set_store_action(metal::MTLStoreAction::Store);
288 let alpha = if self.layer.is_opaque() { 1. } else { 0. };
289 color_attachment.set_clear_color(metal::MTLClearColor::new(0., 0., 0., alpha));
290 let command_encoder = command_buffer.new_render_command_encoder(render_pass_descriptor);
291
292 command_encoder.set_viewport(metal::MTLViewport {
293 originX: 0.0,
294 originY: 0.0,
295 width: i32::from(viewport_size.width) as f64,
296 height: i32::from(viewport_size.height) as f64,
297 znear: 0.0,
298 zfar: 1.0,
299 });
300
301 for batch in scene.batches() {
302 let ok = match batch {
303 PrimitiveBatch::Shadows(shadows) => self.draw_shadows(
304 shadows,
305 &mut instance_buffer,
306 &mut instance_offset,
307 viewport_size,
308 command_encoder,
309 ),
310 PrimitiveBatch::Quads(quads) => self.draw_quads(
311 quads,
312 &mut instance_buffer,
313 &mut instance_offset,
314 viewport_size,
315 command_encoder,
316 ),
317 PrimitiveBatch::Paths(paths) => self.draw_paths(
318 paths,
319 &path_tiles,
320 &mut instance_buffer,
321 &mut instance_offset,
322 viewport_size,
323 command_encoder,
324 ),
325 PrimitiveBatch::Underlines(underlines) => self.draw_underlines(
326 underlines,
327 &mut instance_buffer,
328 &mut instance_offset,
329 viewport_size,
330 command_encoder,
331 ),
332 PrimitiveBatch::MonochromeSprites {
333 texture_id,
334 sprites,
335 } => self.draw_monochrome_sprites(
336 texture_id,
337 sprites,
338 &mut instance_buffer,
339 &mut instance_offset,
340 viewport_size,
341 command_encoder,
342 ),
343 PrimitiveBatch::PolychromeSprites {
344 texture_id,
345 sprites,
346 } => self.draw_polychrome_sprites(
347 texture_id,
348 sprites,
349 &mut instance_buffer,
350 &mut instance_offset,
351 viewport_size,
352 command_encoder,
353 ),
354 PrimitiveBatch::Surfaces(surfaces) => self.draw_surfaces(
355 surfaces,
356 &mut instance_buffer,
357 &mut instance_offset,
358 viewport_size,
359 command_encoder,
360 ),
361 };
362
363 if !ok {
364 log::error!("scene too large: {} paths, {} shadows, {} quads, {} underlines, {} mono, {} poly, {} surfaces",
365 scene.paths.len(),
366 scene.shadows.len(),
367 scene.quads.len(),
368 scene.underlines.len(),
369 scene.monochrome_sprites.len(),
370 scene.polychrome_sprites.len(),
371 scene.surfaces.len(),
372 );
373 break;
374 }
375 }
376
377 command_encoder.end_encoding();
378
379 instance_buffer.did_modify_range(NSRange {
380 location: 0,
381 length: instance_offset as NSUInteger,
382 });
383
384 let instance_buffer_pool = self.instance_buffer_pool.clone();
385 let instance_buffer = Cell::new(Some(instance_buffer));
386 let block = ConcreteBlock::new(move |_| {
387 if let Some(instance_buffer) = instance_buffer.take() {
388 instance_buffer_pool.lock().push(instance_buffer);
389 }
390 });
391 let block = block.copy();
392 command_buffer.add_completed_handler(&block);
393
394 self.sprite_atlas.clear_textures(AtlasTextureKind::Path);
395
396 if self.presents_with_transaction {
397 command_buffer.commit();
398 command_buffer.wait_until_scheduled();
399 drawable.present();
400 } else {
401 command_buffer.present_drawable(drawable);
402 command_buffer.commit();
403 }
404 }
405
406 fn rasterize_paths(
407 &mut self,
408 paths: &[Path<ScaledPixels>],
409 instance_buffer: &mut metal::Buffer,
410 instance_offset: &mut usize,
411 command_buffer: &metal::CommandBufferRef,
412 ) -> Option<HashMap<PathId, AtlasTile>> {
413 let mut tiles = HashMap::default();
414 let mut vertices_by_texture_id = HashMap::default();
415 for path in paths {
416 let clipped_bounds = path.bounds.intersect(&path.content_mask.bounds);
417
418 let tile = self
419 .sprite_atlas
420 .allocate(clipped_bounds.size.map(Into::into), AtlasTextureKind::Path)?;
421 vertices_by_texture_id
422 .entry(tile.texture_id)
423 .or_insert(Vec::new())
424 .extend(path.vertices.iter().map(|vertex| PathVertex {
425 xy_position: vertex.xy_position - clipped_bounds.origin
426 + tile.bounds.origin.map(Into::into),
427 st_position: vertex.st_position,
428 content_mask: ContentMask {
429 bounds: tile.bounds.map(Into::into),
430 },
431 }));
432 tiles.insert(path.id, tile);
433 }
434
435 for (texture_id, vertices) in vertices_by_texture_id {
436 align_offset(instance_offset);
437 let vertices_bytes_len = mem::size_of_val(vertices.as_slice());
438 let next_offset = *instance_offset + vertices_bytes_len;
439 if next_offset > INSTANCE_BUFFER_SIZE {
440 return None;
441 }
442
443 let render_pass_descriptor = metal::RenderPassDescriptor::new();
444 let color_attachment = render_pass_descriptor
445 .color_attachments()
446 .object_at(0)
447 .unwrap();
448
449 let texture = self.sprite_atlas.metal_texture(texture_id);
450 color_attachment.set_texture(Some(&texture));
451 color_attachment.set_load_action(metal::MTLLoadAction::Clear);
452 color_attachment.set_store_action(metal::MTLStoreAction::Store);
453 color_attachment.set_clear_color(metal::MTLClearColor::new(0., 0., 0., 1.));
454 let command_encoder = command_buffer.new_render_command_encoder(render_pass_descriptor);
455 command_encoder.set_render_pipeline_state(&self.paths_rasterization_pipeline_state);
456 command_encoder.set_vertex_buffer(
457 PathRasterizationInputIndex::Vertices as u64,
458 Some(instance_buffer),
459 *instance_offset as u64,
460 );
461 let texture_size = Size {
462 width: DevicePixels::from(texture.width()),
463 height: DevicePixels::from(texture.height()),
464 };
465 command_encoder.set_vertex_bytes(
466 PathRasterizationInputIndex::AtlasTextureSize as u64,
467 mem::size_of_val(&texture_size) as u64,
468 &texture_size as *const Size<DevicePixels> as *const _,
469 );
470
471 let buffer_contents =
472 unsafe { (instance_buffer.contents() as *mut u8).add(*instance_offset) };
473 unsafe {
474 ptr::copy_nonoverlapping(
475 vertices.as_ptr() as *const u8,
476 buffer_contents,
477 vertices_bytes_len,
478 );
479 }
480
481 command_encoder.draw_primitives(
482 metal::MTLPrimitiveType::Triangle,
483 0,
484 vertices.len() as u64,
485 );
486 command_encoder.end_encoding();
487 *instance_offset = next_offset;
488 }
489
490 Some(tiles)
491 }
492
493 fn draw_shadows(
494 &mut self,
495 shadows: &[Shadow],
496 instance_buffer: &mut metal::Buffer,
497 instance_offset: &mut usize,
498 viewport_size: Size<DevicePixels>,
499 command_encoder: &metal::RenderCommandEncoderRef,
500 ) -> bool {
501 if shadows.is_empty() {
502 return true;
503 }
504 align_offset(instance_offset);
505
506 command_encoder.set_render_pipeline_state(&self.shadows_pipeline_state);
507 command_encoder.set_vertex_buffer(
508 ShadowInputIndex::Vertices as u64,
509 Some(&self.unit_vertices),
510 0,
511 );
512 command_encoder.set_vertex_buffer(
513 ShadowInputIndex::Shadows as u64,
514 Some(instance_buffer),
515 *instance_offset as u64,
516 );
517 command_encoder.set_fragment_buffer(
518 ShadowInputIndex::Shadows as u64,
519 Some(instance_buffer),
520 *instance_offset as u64,
521 );
522
523 command_encoder.set_vertex_bytes(
524 ShadowInputIndex::ViewportSize as u64,
525 mem::size_of_val(&viewport_size) as u64,
526 &viewport_size as *const Size<DevicePixels> as *const _,
527 );
528
529 let shadow_bytes_len = mem::size_of_val(shadows);
530 let buffer_contents =
531 unsafe { (instance_buffer.contents() as *mut u8).add(*instance_offset) };
532
533 let next_offset = *instance_offset + shadow_bytes_len;
534 if next_offset > INSTANCE_BUFFER_SIZE {
535 return false;
536 }
537
538 unsafe {
539 ptr::copy_nonoverlapping(
540 shadows.as_ptr() as *const u8,
541 buffer_contents,
542 shadow_bytes_len,
543 );
544 }
545
546 command_encoder.draw_primitives_instanced(
547 metal::MTLPrimitiveType::Triangle,
548 0,
549 6,
550 shadows.len() as u64,
551 );
552 *instance_offset = next_offset;
553 true
554 }
555
556 fn draw_quads(
557 &mut self,
558 quads: &[Quad],
559 instance_buffer: &mut metal::Buffer,
560 instance_offset: &mut usize,
561 viewport_size: Size<DevicePixels>,
562 command_encoder: &metal::RenderCommandEncoderRef,
563 ) -> bool {
564 if quads.is_empty() {
565 return true;
566 }
567 align_offset(instance_offset);
568
569 command_encoder.set_render_pipeline_state(&self.quads_pipeline_state);
570 command_encoder.set_vertex_buffer(
571 QuadInputIndex::Vertices as u64,
572 Some(&self.unit_vertices),
573 0,
574 );
575 command_encoder.set_vertex_buffer(
576 QuadInputIndex::Quads as u64,
577 Some(instance_buffer),
578 *instance_offset as u64,
579 );
580 command_encoder.set_fragment_buffer(
581 QuadInputIndex::Quads as u64,
582 Some(instance_buffer),
583 *instance_offset as u64,
584 );
585
586 command_encoder.set_vertex_bytes(
587 QuadInputIndex::ViewportSize as u64,
588 mem::size_of_val(&viewport_size) as u64,
589 &viewport_size as *const Size<DevicePixels> as *const _,
590 );
591
592 let quad_bytes_len = mem::size_of_val(quads);
593 let buffer_contents =
594 unsafe { (instance_buffer.contents() as *mut u8).add(*instance_offset) };
595
596 let next_offset = *instance_offset + quad_bytes_len;
597 if next_offset > INSTANCE_BUFFER_SIZE {
598 return false;
599 }
600
601 unsafe {
602 ptr::copy_nonoverlapping(quads.as_ptr() as *const u8, buffer_contents, quad_bytes_len);
603 }
604
605 command_encoder.draw_primitives_instanced(
606 metal::MTLPrimitiveType::Triangle,
607 0,
608 6,
609 quads.len() as u64,
610 );
611 *instance_offset = next_offset;
612 true
613 }
614
615 fn draw_paths(
616 &mut self,
617 paths: &[Path<ScaledPixels>],
618 tiles_by_path_id: &HashMap<PathId, AtlasTile>,
619 instance_buffer: &mut metal::Buffer,
620 instance_offset: &mut usize,
621 viewport_size: Size<DevicePixels>,
622 command_encoder: &metal::RenderCommandEncoderRef,
623 ) -> bool {
624 if paths.is_empty() {
625 return true;
626 }
627
628 command_encoder.set_render_pipeline_state(&self.path_sprites_pipeline_state);
629 command_encoder.set_vertex_buffer(
630 SpriteInputIndex::Vertices as u64,
631 Some(&self.unit_vertices),
632 0,
633 );
634 command_encoder.set_vertex_bytes(
635 SpriteInputIndex::ViewportSize as u64,
636 mem::size_of_val(&viewport_size) as u64,
637 &viewport_size as *const Size<DevicePixels> as *const _,
638 );
639
640 let mut prev_texture_id = None;
641 let mut sprites = SmallVec::<[_; 1]>::new();
642 let mut paths_and_tiles = paths
643 .iter()
644 .map(|path| (path, tiles_by_path_id.get(&path.id).unwrap()))
645 .peekable();
646
647 loop {
648 if let Some((path, tile)) = paths_and_tiles.peek() {
649 if prev_texture_id.map_or(true, |texture_id| texture_id == tile.texture_id) {
650 prev_texture_id = Some(tile.texture_id);
651 let origin = path.bounds.intersect(&path.content_mask.bounds).origin;
652 sprites.push(PathSprite {
653 bounds: Bounds {
654 origin: origin.map(|p| p.floor()),
655 size: tile.bounds.size.map(Into::into),
656 },
657 color: path.color,
658 tile: (*tile).clone(),
659 });
660 paths_and_tiles.next();
661 continue;
662 }
663 }
664
665 if sprites.is_empty() {
666 break;
667 } else {
668 align_offset(instance_offset);
669 let texture_id = prev_texture_id.take().unwrap();
670 let texture: metal::Texture = self.sprite_atlas.metal_texture(texture_id);
671 let texture_size = size(
672 DevicePixels(texture.width() as i32),
673 DevicePixels(texture.height() as i32),
674 );
675
676 command_encoder.set_vertex_buffer(
677 SpriteInputIndex::Sprites as u64,
678 Some(instance_buffer),
679 *instance_offset as u64,
680 );
681 command_encoder.set_vertex_bytes(
682 SpriteInputIndex::AtlasTextureSize as u64,
683 mem::size_of_val(&texture_size) as u64,
684 &texture_size as *const Size<DevicePixels> as *const _,
685 );
686 command_encoder.set_fragment_buffer(
687 SpriteInputIndex::Sprites as u64,
688 Some(instance_buffer),
689 *instance_offset as u64,
690 );
691 command_encoder
692 .set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
693
694 let sprite_bytes_len = mem::size_of_val(sprites.as_slice());
695 let next_offset = *instance_offset + sprite_bytes_len;
696 if next_offset > INSTANCE_BUFFER_SIZE {
697 return false;
698 }
699
700 let buffer_contents =
701 unsafe { (instance_buffer.contents() as *mut u8).add(*instance_offset) };
702
703 unsafe {
704 ptr::copy_nonoverlapping(
705 sprites.as_ptr() as *const u8,
706 buffer_contents,
707 sprite_bytes_len,
708 );
709 }
710
711 command_encoder.draw_primitives_instanced(
712 metal::MTLPrimitiveType::Triangle,
713 0,
714 6,
715 sprites.len() as u64,
716 );
717 *instance_offset = next_offset;
718 sprites.clear();
719 }
720 }
721 true
722 }
723
724 fn draw_underlines(
725 &mut self,
726 underlines: &[Underline],
727 instance_buffer: &mut metal::Buffer,
728 instance_offset: &mut usize,
729 viewport_size: Size<DevicePixels>,
730 command_encoder: &metal::RenderCommandEncoderRef,
731 ) -> bool {
732 if underlines.is_empty() {
733 return true;
734 }
735 align_offset(instance_offset);
736
737 command_encoder.set_render_pipeline_state(&self.underlines_pipeline_state);
738 command_encoder.set_vertex_buffer(
739 UnderlineInputIndex::Vertices as u64,
740 Some(&self.unit_vertices),
741 0,
742 );
743 command_encoder.set_vertex_buffer(
744 UnderlineInputIndex::Underlines as u64,
745 Some(instance_buffer),
746 *instance_offset as u64,
747 );
748 command_encoder.set_fragment_buffer(
749 UnderlineInputIndex::Underlines as u64,
750 Some(instance_buffer),
751 *instance_offset as u64,
752 );
753
754 command_encoder.set_vertex_bytes(
755 UnderlineInputIndex::ViewportSize as u64,
756 mem::size_of_val(&viewport_size) as u64,
757 &viewport_size as *const Size<DevicePixels> as *const _,
758 );
759
760 let underline_bytes_len = mem::size_of_val(underlines);
761 let buffer_contents =
762 unsafe { (instance_buffer.contents() as *mut u8).add(*instance_offset) };
763
764 let next_offset = *instance_offset + underline_bytes_len;
765 if next_offset > INSTANCE_BUFFER_SIZE {
766 return false;
767 }
768
769 unsafe {
770 ptr::copy_nonoverlapping(
771 underlines.as_ptr() as *const u8,
772 buffer_contents,
773 underline_bytes_len,
774 );
775 }
776
777 command_encoder.draw_primitives_instanced(
778 metal::MTLPrimitiveType::Triangle,
779 0,
780 6,
781 underlines.len() as u64,
782 );
783 *instance_offset = next_offset;
784 true
785 }
786
787 fn draw_monochrome_sprites(
788 &mut self,
789 texture_id: AtlasTextureId,
790 sprites: &[MonochromeSprite],
791 instance_buffer: &mut metal::Buffer,
792 instance_offset: &mut usize,
793 viewport_size: Size<DevicePixels>,
794 command_encoder: &metal::RenderCommandEncoderRef,
795 ) -> bool {
796 if sprites.is_empty() {
797 return true;
798 }
799 align_offset(instance_offset);
800
801 let texture = self.sprite_atlas.metal_texture(texture_id);
802 let texture_size = size(
803 DevicePixels(texture.width() as i32),
804 DevicePixels(texture.height() as i32),
805 );
806 command_encoder.set_render_pipeline_state(&self.monochrome_sprites_pipeline_state);
807 command_encoder.set_vertex_buffer(
808 SpriteInputIndex::Vertices as u64,
809 Some(&self.unit_vertices),
810 0,
811 );
812 command_encoder.set_vertex_buffer(
813 SpriteInputIndex::Sprites as u64,
814 Some(instance_buffer),
815 *instance_offset as u64,
816 );
817 command_encoder.set_vertex_bytes(
818 SpriteInputIndex::ViewportSize as u64,
819 mem::size_of_val(&viewport_size) as u64,
820 &viewport_size as *const Size<DevicePixels> as *const _,
821 );
822 command_encoder.set_vertex_bytes(
823 SpriteInputIndex::AtlasTextureSize as u64,
824 mem::size_of_val(&texture_size) as u64,
825 &texture_size as *const Size<DevicePixels> as *const _,
826 );
827 command_encoder.set_fragment_buffer(
828 SpriteInputIndex::Sprites as u64,
829 Some(instance_buffer),
830 *instance_offset as u64,
831 );
832 command_encoder.set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
833
834 let sprite_bytes_len = mem::size_of_val(sprites);
835 let buffer_contents =
836 unsafe { (instance_buffer.contents() as *mut u8).add(*instance_offset) };
837
838 let next_offset = *instance_offset + sprite_bytes_len;
839 if next_offset > INSTANCE_BUFFER_SIZE {
840 return false;
841 }
842
843 unsafe {
844 ptr::copy_nonoverlapping(
845 sprites.as_ptr() as *const u8,
846 buffer_contents,
847 sprite_bytes_len,
848 );
849 }
850
851 command_encoder.draw_primitives_instanced(
852 metal::MTLPrimitiveType::Triangle,
853 0,
854 6,
855 sprites.len() as u64,
856 );
857 *instance_offset = next_offset;
858 true
859 }
860
861 fn draw_polychrome_sprites(
862 &mut self,
863 texture_id: AtlasTextureId,
864 sprites: &[PolychromeSprite],
865 instance_buffer: &mut metal::Buffer,
866 instance_offset: &mut usize,
867 viewport_size: Size<DevicePixels>,
868 command_encoder: &metal::RenderCommandEncoderRef,
869 ) -> bool {
870 if sprites.is_empty() {
871 return true;
872 }
873 align_offset(instance_offset);
874
875 let texture = self.sprite_atlas.metal_texture(texture_id);
876 let texture_size = size(
877 DevicePixels(texture.width() as i32),
878 DevicePixels(texture.height() as i32),
879 );
880 command_encoder.set_render_pipeline_state(&self.polychrome_sprites_pipeline_state);
881 command_encoder.set_vertex_buffer(
882 SpriteInputIndex::Vertices as u64,
883 Some(&self.unit_vertices),
884 0,
885 );
886 command_encoder.set_vertex_buffer(
887 SpriteInputIndex::Sprites as u64,
888 Some(instance_buffer),
889 *instance_offset as u64,
890 );
891 command_encoder.set_vertex_bytes(
892 SpriteInputIndex::ViewportSize as u64,
893 mem::size_of_val(&viewport_size) as u64,
894 &viewport_size as *const Size<DevicePixels> as *const _,
895 );
896 command_encoder.set_vertex_bytes(
897 SpriteInputIndex::AtlasTextureSize as u64,
898 mem::size_of_val(&texture_size) as u64,
899 &texture_size as *const Size<DevicePixels> as *const _,
900 );
901 command_encoder.set_fragment_buffer(
902 SpriteInputIndex::Sprites as u64,
903 Some(instance_buffer),
904 *instance_offset as u64,
905 );
906 command_encoder.set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
907
908 let sprite_bytes_len = mem::size_of_val(sprites);
909 let buffer_contents =
910 unsafe { (instance_buffer.contents() as *mut u8).add(*instance_offset) };
911
912 let next_offset = *instance_offset + sprite_bytes_len;
913 if next_offset > INSTANCE_BUFFER_SIZE {
914 return false;
915 }
916
917 unsafe {
918 ptr::copy_nonoverlapping(
919 sprites.as_ptr() as *const u8,
920 buffer_contents,
921 sprite_bytes_len,
922 );
923 }
924
925 command_encoder.draw_primitives_instanced(
926 metal::MTLPrimitiveType::Triangle,
927 0,
928 6,
929 sprites.len() as u64,
930 );
931 *instance_offset = next_offset;
932 true
933 }
934
935 fn draw_surfaces(
936 &mut self,
937 surfaces: &[Surface],
938 instance_buffer: &mut metal::Buffer,
939 instance_offset: &mut usize,
940 viewport_size: Size<DevicePixels>,
941 command_encoder: &metal::RenderCommandEncoderRef,
942 ) -> bool {
943 command_encoder.set_render_pipeline_state(&self.surfaces_pipeline_state);
944 command_encoder.set_vertex_buffer(
945 SurfaceInputIndex::Vertices as u64,
946 Some(&self.unit_vertices),
947 0,
948 );
949 command_encoder.set_vertex_bytes(
950 SurfaceInputIndex::ViewportSize as u64,
951 mem::size_of_val(&viewport_size) as u64,
952 &viewport_size as *const Size<DevicePixels> as *const _,
953 );
954
955 for surface in surfaces {
956 let texture_size = size(
957 DevicePixels::from(surface.image_buffer.width() as i32),
958 DevicePixels::from(surface.image_buffer.height() as i32),
959 );
960
961 assert_eq!(
962 surface.image_buffer.pixel_format_type(),
963 media::core_video::kCVPixelFormatType_420YpCbCr8BiPlanarFullRange
964 );
965
966 let y_texture = unsafe {
967 self.core_video_texture_cache
968 .create_texture_from_image(
969 surface.image_buffer.as_concrete_TypeRef(),
970 ptr::null(),
971 MTLPixelFormat::R8Unorm,
972 surface.image_buffer.plane_width(0),
973 surface.image_buffer.plane_height(0),
974 0,
975 )
976 .unwrap()
977 };
978 let cb_cr_texture = unsafe {
979 self.core_video_texture_cache
980 .create_texture_from_image(
981 surface.image_buffer.as_concrete_TypeRef(),
982 ptr::null(),
983 MTLPixelFormat::RG8Unorm,
984 surface.image_buffer.plane_width(1),
985 surface.image_buffer.plane_height(1),
986 1,
987 )
988 .unwrap()
989 };
990
991 align_offset(instance_offset);
992 let next_offset = *instance_offset + mem::size_of::<Surface>();
993 if next_offset > INSTANCE_BUFFER_SIZE {
994 return false;
995 }
996
997 command_encoder.set_vertex_buffer(
998 SurfaceInputIndex::Surfaces as u64,
999 Some(instance_buffer),
1000 *instance_offset as u64,
1001 );
1002 command_encoder.set_vertex_bytes(
1003 SurfaceInputIndex::TextureSize as u64,
1004 mem::size_of_val(&texture_size) as u64,
1005 &texture_size as *const Size<DevicePixels> as *const _,
1006 );
1007 command_encoder.set_fragment_texture(
1008 SurfaceInputIndex::YTexture as u64,
1009 Some(y_texture.as_texture_ref()),
1010 );
1011 command_encoder.set_fragment_texture(
1012 SurfaceInputIndex::CbCrTexture as u64,
1013 Some(cb_cr_texture.as_texture_ref()),
1014 );
1015
1016 unsafe {
1017 let buffer_contents = (instance_buffer.contents() as *mut u8).add(*instance_offset)
1018 as *mut SurfaceBounds;
1019 ptr::write(
1020 buffer_contents,
1021 SurfaceBounds {
1022 bounds: surface.bounds,
1023 content_mask: surface.content_mask.clone(),
1024 },
1025 );
1026 }
1027
1028 command_encoder.draw_primitives(metal::MTLPrimitiveType::Triangle, 0, 6);
1029 *instance_offset = next_offset;
1030 }
1031 true
1032 }
1033}
1034
1035fn build_pipeline_state(
1036 device: &metal::DeviceRef,
1037 library: &metal::LibraryRef,
1038 label: &str,
1039 vertex_fn_name: &str,
1040 fragment_fn_name: &str,
1041 pixel_format: metal::MTLPixelFormat,
1042) -> metal::RenderPipelineState {
1043 let vertex_fn = library
1044 .get_function(vertex_fn_name, None)
1045 .expect("error locating vertex function");
1046 let fragment_fn = library
1047 .get_function(fragment_fn_name, None)
1048 .expect("error locating fragment function");
1049
1050 let descriptor = metal::RenderPipelineDescriptor::new();
1051 descriptor.set_label(label);
1052 descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
1053 descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
1054 let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
1055 color_attachment.set_pixel_format(pixel_format);
1056 color_attachment.set_blending_enabled(true);
1057 color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
1058 color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
1059 color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::SourceAlpha);
1060 color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::One);
1061 color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
1062 color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::One);
1063
1064 device
1065 .new_render_pipeline_state(&descriptor)
1066 .expect("could not create render pipeline state")
1067}
1068
1069fn build_path_rasterization_pipeline_state(
1070 device: &metal::DeviceRef,
1071 library: &metal::LibraryRef,
1072 label: &str,
1073 vertex_fn_name: &str,
1074 fragment_fn_name: &str,
1075 pixel_format: metal::MTLPixelFormat,
1076) -> metal::RenderPipelineState {
1077 let vertex_fn = library
1078 .get_function(vertex_fn_name, None)
1079 .expect("error locating vertex function");
1080 let fragment_fn = library
1081 .get_function(fragment_fn_name, None)
1082 .expect("error locating fragment function");
1083
1084 let descriptor = metal::RenderPipelineDescriptor::new();
1085 descriptor.set_label(label);
1086 descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
1087 descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
1088 let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
1089 color_attachment.set_pixel_format(pixel_format);
1090 color_attachment.set_blending_enabled(true);
1091 color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
1092 color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
1093 color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::One);
1094 color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::One);
1095 color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::One);
1096 color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::One);
1097
1098 device
1099 .new_render_pipeline_state(&descriptor)
1100 .expect("could not create render pipeline state")
1101}
1102
1103// Align to multiples of 256 make Metal happy.
1104fn align_offset(offset: &mut usize) {
1105 *offset = ((*offset + 255) / 256) * 256;
1106}
1107
1108#[repr(C)]
1109enum ShadowInputIndex {
1110 Vertices = 0,
1111 Shadows = 1,
1112 ViewportSize = 2,
1113}
1114
1115#[repr(C)]
1116enum QuadInputIndex {
1117 Vertices = 0,
1118 Quads = 1,
1119 ViewportSize = 2,
1120}
1121
1122#[repr(C)]
1123enum UnderlineInputIndex {
1124 Vertices = 0,
1125 Underlines = 1,
1126 ViewportSize = 2,
1127}
1128
1129#[repr(C)]
1130enum SpriteInputIndex {
1131 Vertices = 0,
1132 Sprites = 1,
1133 ViewportSize = 2,
1134 AtlasTextureSize = 3,
1135 AtlasTexture = 4,
1136}
1137
1138#[repr(C)]
1139enum SurfaceInputIndex {
1140 Vertices = 0,
1141 Surfaces = 1,
1142 ViewportSize = 2,
1143 TextureSize = 3,
1144 YTexture = 4,
1145 CbCrTexture = 5,
1146}
1147
1148#[repr(C)]
1149enum PathRasterizationInputIndex {
1150 Vertices = 0,
1151 AtlasTextureSize = 1,
1152}
1153
1154#[derive(Clone, Debug, Eq, PartialEq)]
1155#[repr(C)]
1156pub struct PathSprite {
1157 pub bounds: Bounds<ScaledPixels>,
1158 pub color: Hsla,
1159 pub tile: AtlasTile,
1160}
1161
1162#[derive(Clone, Debug, Eq, PartialEq)]
1163#[repr(C)]
1164pub struct SurfaceBounds {
1165 pub bounds: Bounds<ScaledPixels>,
1166 pub content_mask: ContentMask<ScaledPixels>,
1167}