metal_renderer.rs

   1use crate::{
   2    point, size, AtlasTextureId, AtlasTextureKind, AtlasTile, Bounds, ContentMask, DevicePixels,
   3    Hsla, MetalAtlas, MonochromeSprite, Path, PathId, PathVertex, PolychromeSprite, PrimitiveBatch,
   4    Quad, ScaledPixels, Scene, Shadow, Size, Surface, Underline,
   5};
   6use cocoa::{
   7    base::{NO, YES},
   8    foundation::NSUInteger,
   9    quartzcore::AutoresizingMask,
  10};
  11use collections::HashMap;
  12use core_foundation::base::TCFType;
  13use foreign_types::ForeignType;
  14use media::core_video::CVMetalTextureCache;
  15use metal::{CommandQueue, MTLPixelFormat, MTLResourceOptions, NSRange};
  16use objc::{self, msg_send, sel, sel_impl};
  17use smallvec::SmallVec;
  18use std::{ffi::c_void, mem, ptr, sync::Arc};
  19
  20const SHADERS_METALLIB: &[u8] = include_bytes!(concat!(env!("OUT_DIR"), "/shaders.metallib"));
  21const INSTANCE_BUFFER_SIZE: usize = 32 * 1024 * 1024; // This is an arbitrary decision. There's probably a more optimal value (maybe even we could adjust dynamically...)
  22
  23pub(crate) struct MetalRenderer {
  24    layer: metal::MetalLayer,
  25    command_queue: CommandQueue,
  26    paths_rasterization_pipeline_state: metal::RenderPipelineState,
  27    path_sprites_pipeline_state: metal::RenderPipelineState,
  28    shadows_pipeline_state: metal::RenderPipelineState,
  29    quads_pipeline_state: metal::RenderPipelineState,
  30    underlines_pipeline_state: metal::RenderPipelineState,
  31    monochrome_sprites_pipeline_state: metal::RenderPipelineState,
  32    polychrome_sprites_pipeline_state: metal::RenderPipelineState,
  33    surfaces_pipeline_state: metal::RenderPipelineState,
  34    unit_vertices: metal::Buffer,
  35    instances: metal::Buffer,
  36    sprite_atlas: Arc<MetalAtlas>,
  37    core_video_texture_cache: CVMetalTextureCache,
  38}
  39
  40impl MetalRenderer {
  41    pub fn new(is_opaque: bool) -> Self {
  42        let device: metal::Device = if let Some(device) = metal::Device::system_default() {
  43            device
  44        } else {
  45            log::error!("unable to access a compatible graphics device");
  46            std::process::exit(1);
  47        };
  48
  49        let layer = metal::MetalLayer::new();
  50        layer.set_device(&device);
  51        layer.set_pixel_format(MTLPixelFormat::BGRA8Unorm);
  52        layer.set_presents_with_transaction(true);
  53        layer.set_opaque(is_opaque);
  54        unsafe {
  55            let _: () = msg_send![&*layer, setAllowsNextDrawableTimeout: NO];
  56            let _: () = msg_send![&*layer, setNeedsDisplayOnBoundsChange: YES];
  57            let _: () = msg_send![
  58                &*layer,
  59                setAutoresizingMask: AutoresizingMask::WIDTH_SIZABLE
  60                    | AutoresizingMask::HEIGHT_SIZABLE
  61            ];
  62        }
  63
  64        let library = device
  65            .new_library_with_data(SHADERS_METALLIB)
  66            .expect("error building metal library");
  67
  68        fn to_float2_bits(point: crate::PointF) -> u64 {
  69            let mut output = point.y.to_bits() as u64;
  70            output <<= 32;
  71            output |= point.x.to_bits() as u64;
  72            output
  73        }
  74
  75        let unit_vertices = [
  76            to_float2_bits(point(0., 0.)),
  77            to_float2_bits(point(1., 0.)),
  78            to_float2_bits(point(0., 1.)),
  79            to_float2_bits(point(0., 1.)),
  80            to_float2_bits(point(1., 0.)),
  81            to_float2_bits(point(1., 1.)),
  82        ];
  83        let unit_vertices = device.new_buffer_with_data(
  84            unit_vertices.as_ptr() as *const c_void,
  85            (unit_vertices.len() * mem::size_of::<u64>()) as u64,
  86            MTLResourceOptions::StorageModeManaged,
  87        );
  88        let instances = device.new_buffer(
  89            INSTANCE_BUFFER_SIZE as u64,
  90            MTLResourceOptions::StorageModeManaged,
  91        );
  92
  93        let paths_rasterization_pipeline_state = build_path_rasterization_pipeline_state(
  94            &device,
  95            &library,
  96            "paths_rasterization",
  97            "path_rasterization_vertex",
  98            "path_rasterization_fragment",
  99            MTLPixelFormat::R16Float,
 100        );
 101        let path_sprites_pipeline_state = build_pipeline_state(
 102            &device,
 103            &library,
 104            "path_sprites",
 105            "path_sprite_vertex",
 106            "path_sprite_fragment",
 107            MTLPixelFormat::BGRA8Unorm,
 108        );
 109        let shadows_pipeline_state = build_pipeline_state(
 110            &device,
 111            &library,
 112            "shadows",
 113            "shadow_vertex",
 114            "shadow_fragment",
 115            MTLPixelFormat::BGRA8Unorm,
 116        );
 117        let quads_pipeline_state = build_pipeline_state(
 118            &device,
 119            &library,
 120            "quads",
 121            "quad_vertex",
 122            "quad_fragment",
 123            MTLPixelFormat::BGRA8Unorm,
 124        );
 125        let underlines_pipeline_state = build_pipeline_state(
 126            &device,
 127            &library,
 128            "underlines",
 129            "underline_vertex",
 130            "underline_fragment",
 131            MTLPixelFormat::BGRA8Unorm,
 132        );
 133        let monochrome_sprites_pipeline_state = build_pipeline_state(
 134            &device,
 135            &library,
 136            "monochrome_sprites",
 137            "monochrome_sprite_vertex",
 138            "monochrome_sprite_fragment",
 139            MTLPixelFormat::BGRA8Unorm,
 140        );
 141        let polychrome_sprites_pipeline_state = build_pipeline_state(
 142            &device,
 143            &library,
 144            "polychrome_sprites",
 145            "polychrome_sprite_vertex",
 146            "polychrome_sprite_fragment",
 147            MTLPixelFormat::BGRA8Unorm,
 148        );
 149        let surfaces_pipeline_state = build_pipeline_state(
 150            &device,
 151            &library,
 152            "surfaces",
 153            "surface_vertex",
 154            "surface_fragment",
 155            MTLPixelFormat::BGRA8Unorm,
 156        );
 157
 158        let command_queue = device.new_command_queue();
 159        let sprite_atlas = Arc::new(MetalAtlas::new(device.clone()));
 160
 161        Self {
 162            layer,
 163            command_queue,
 164            paths_rasterization_pipeline_state,
 165            path_sprites_pipeline_state,
 166            shadows_pipeline_state,
 167            quads_pipeline_state,
 168            underlines_pipeline_state,
 169            monochrome_sprites_pipeline_state,
 170            polychrome_sprites_pipeline_state,
 171            surfaces_pipeline_state,
 172            unit_vertices,
 173            instances,
 174            sprite_atlas,
 175            core_video_texture_cache: unsafe { CVMetalTextureCache::new(device.as_ptr()).unwrap() },
 176        }
 177    }
 178
 179    pub fn layer(&self) -> &metal::MetalLayerRef {
 180        &self.layer
 181    }
 182
 183    pub fn sprite_atlas(&self) -> &Arc<MetalAtlas> {
 184        &self.sprite_atlas
 185    }
 186
 187    pub fn draw(&mut self, scene: &Scene) {
 188        let layer = self.layer.clone();
 189        let viewport_size = layer.drawable_size();
 190        let viewport_size: Size<DevicePixels> = size(
 191            (viewport_size.width.ceil() as i32).into(),
 192            (viewport_size.height.ceil() as i32).into(),
 193        );
 194        let drawable = if let Some(drawable) = layer.next_drawable() {
 195            drawable
 196        } else {
 197            log::error!(
 198                "failed to retrieve next drawable, drawable size: {:?}",
 199                viewport_size
 200            );
 201            return;
 202        };
 203        let command_queue = self.command_queue.clone();
 204        let command_buffer = command_queue.new_command_buffer();
 205        let mut instance_offset = 0;
 206
 207        let Some(path_tiles) =
 208            self.rasterize_paths(scene.paths(), &mut instance_offset, command_buffer)
 209        else {
 210            panic!("failed to rasterize {} paths", scene.paths().len());
 211        };
 212
 213        let render_pass_descriptor = metal::RenderPassDescriptor::new();
 214        let color_attachment = render_pass_descriptor
 215            .color_attachments()
 216            .object_at(0)
 217            .unwrap();
 218
 219        color_attachment.set_texture(Some(drawable.texture()));
 220        color_attachment.set_load_action(metal::MTLLoadAction::Clear);
 221        color_attachment.set_store_action(metal::MTLStoreAction::Store);
 222        let alpha = if self.layer.is_opaque() { 1. } else { 0. };
 223        color_attachment.set_clear_color(metal::MTLClearColor::new(0., 0., 0., alpha));
 224        let command_encoder = command_buffer.new_render_command_encoder(render_pass_descriptor);
 225
 226        command_encoder.set_viewport(metal::MTLViewport {
 227            originX: 0.0,
 228            originY: 0.0,
 229            width: i32::from(viewport_size.width) as f64,
 230            height: i32::from(viewport_size.height) as f64,
 231            znear: 0.0,
 232            zfar: 1.0,
 233        });
 234        for batch in scene.batches() {
 235            let ok = match batch {
 236                PrimitiveBatch::Shadows(shadows) => self.draw_shadows(
 237                    shadows,
 238                    &mut instance_offset,
 239                    viewport_size,
 240                    command_encoder,
 241                ),
 242                PrimitiveBatch::Quads(quads) => {
 243                    self.draw_quads(quads, &mut instance_offset, viewport_size, command_encoder)
 244                }
 245                PrimitiveBatch::Paths(paths) => self.draw_paths(
 246                    paths,
 247                    &path_tiles,
 248                    &mut instance_offset,
 249                    viewport_size,
 250                    command_encoder,
 251                ),
 252                PrimitiveBatch::Underlines(underlines) => self.draw_underlines(
 253                    underlines,
 254                    &mut instance_offset,
 255                    viewport_size,
 256                    command_encoder,
 257                ),
 258                PrimitiveBatch::MonochromeSprites {
 259                    texture_id,
 260                    sprites,
 261                } => self.draw_monochrome_sprites(
 262                    texture_id,
 263                    sprites,
 264                    &mut instance_offset,
 265                    viewport_size,
 266                    command_encoder,
 267                ),
 268                PrimitiveBatch::PolychromeSprites {
 269                    texture_id,
 270                    sprites,
 271                } => self.draw_polychrome_sprites(
 272                    texture_id,
 273                    sprites,
 274                    &mut instance_offset,
 275                    viewport_size,
 276                    command_encoder,
 277                ),
 278                PrimitiveBatch::Surfaces(surfaces) => self.draw_surfaces(
 279                    surfaces,
 280                    &mut instance_offset,
 281                    viewport_size,
 282                    command_encoder,
 283                ),
 284            };
 285
 286            if !ok {
 287                panic!("scene too large: {} paths, {} shadows, {} quads, {} underlines, {} mono, {} poly, {} surfaces",
 288                    scene.paths.len(),
 289                    scene.shadows.len(),
 290                    scene.quads.len(),
 291                    scene.underlines.len(),
 292                    scene.monochrome_sprites.len(),
 293                    scene.polychrome_sprites.len(),
 294                    scene.surfaces.len(),
 295                )
 296            }
 297        }
 298
 299        command_encoder.end_encoding();
 300
 301        self.instances.did_modify_range(NSRange {
 302            location: 0,
 303            length: instance_offset as NSUInteger,
 304        });
 305
 306        command_buffer.commit();
 307        self.sprite_atlas.clear_textures(AtlasTextureKind::Path);
 308
 309        command_buffer.wait_until_completed();
 310        drawable.present();
 311    }
 312
 313    fn rasterize_paths(
 314        &mut self,
 315        paths: &[Path<ScaledPixels>],
 316        offset: &mut usize,
 317        command_buffer: &metal::CommandBufferRef,
 318    ) -> Option<HashMap<PathId, AtlasTile>> {
 319        let mut tiles = HashMap::default();
 320        let mut vertices_by_texture_id = HashMap::default();
 321        for path in paths {
 322            let clipped_bounds = path.bounds.intersect(&path.content_mask.bounds);
 323
 324            let tile = self
 325                .sprite_atlas
 326                .allocate(clipped_bounds.size.map(Into::into), AtlasTextureKind::Path);
 327            vertices_by_texture_id
 328                .entry(tile.texture_id)
 329                .or_insert(Vec::new())
 330                .extend(path.vertices.iter().map(|vertex| PathVertex {
 331                    xy_position: vertex.xy_position - clipped_bounds.origin
 332                        + tile.bounds.origin.map(Into::into),
 333                    st_position: vertex.st_position,
 334                    content_mask: ContentMask {
 335                        bounds: tile.bounds.map(Into::into),
 336                    },
 337                }));
 338            tiles.insert(path.id, tile);
 339        }
 340
 341        for (texture_id, vertices) in vertices_by_texture_id {
 342            align_offset(offset);
 343            let next_offset = *offset + vertices.len() * mem::size_of::<PathVertex<ScaledPixels>>();
 344            if next_offset > INSTANCE_BUFFER_SIZE {
 345                return None;
 346            }
 347
 348            let render_pass_descriptor = metal::RenderPassDescriptor::new();
 349            let color_attachment = render_pass_descriptor
 350                .color_attachments()
 351                .object_at(0)
 352                .unwrap();
 353
 354            let texture = self.sprite_atlas.metal_texture(texture_id);
 355            color_attachment.set_texture(Some(&texture));
 356            color_attachment.set_load_action(metal::MTLLoadAction::Clear);
 357            color_attachment.set_store_action(metal::MTLStoreAction::Store);
 358            color_attachment.set_clear_color(metal::MTLClearColor::new(0., 0., 0., 1.));
 359            let command_encoder = command_buffer.new_render_command_encoder(render_pass_descriptor);
 360            command_encoder.set_render_pipeline_state(&self.paths_rasterization_pipeline_state);
 361            command_encoder.set_vertex_buffer(
 362                PathRasterizationInputIndex::Vertices as u64,
 363                Some(&self.instances),
 364                *offset as u64,
 365            );
 366            let texture_size = Size {
 367                width: DevicePixels::from(texture.width()),
 368                height: DevicePixels::from(texture.height()),
 369            };
 370            command_encoder.set_vertex_bytes(
 371                PathRasterizationInputIndex::AtlasTextureSize as u64,
 372                mem::size_of_val(&texture_size) as u64,
 373                &texture_size as *const Size<DevicePixels> as *const _,
 374            );
 375
 376            let vertices_bytes_len = mem::size_of::<PathVertex<ScaledPixels>>() * vertices.len();
 377            let buffer_contents = unsafe { (self.instances.contents() as *mut u8).add(*offset) };
 378            unsafe {
 379                ptr::copy_nonoverlapping(
 380                    vertices.as_ptr() as *const u8,
 381                    buffer_contents,
 382                    vertices_bytes_len,
 383                );
 384            }
 385
 386            command_encoder.draw_primitives(
 387                metal::MTLPrimitiveType::Triangle,
 388                0,
 389                vertices.len() as u64,
 390            );
 391            command_encoder.end_encoding();
 392            *offset = next_offset;
 393        }
 394
 395        Some(tiles)
 396    }
 397
 398    fn draw_shadows(
 399        &mut self,
 400        shadows: &[Shadow],
 401        offset: &mut usize,
 402        viewport_size: Size<DevicePixels>,
 403        command_encoder: &metal::RenderCommandEncoderRef,
 404    ) -> bool {
 405        if shadows.is_empty() {
 406            return true;
 407        }
 408        align_offset(offset);
 409
 410        command_encoder.set_render_pipeline_state(&self.shadows_pipeline_state);
 411        command_encoder.set_vertex_buffer(
 412            ShadowInputIndex::Vertices as u64,
 413            Some(&self.unit_vertices),
 414            0,
 415        );
 416        command_encoder.set_vertex_buffer(
 417            ShadowInputIndex::Shadows as u64,
 418            Some(&self.instances),
 419            *offset as u64,
 420        );
 421        command_encoder.set_fragment_buffer(
 422            ShadowInputIndex::Shadows as u64,
 423            Some(&self.instances),
 424            *offset as u64,
 425        );
 426
 427        command_encoder.set_vertex_bytes(
 428            ShadowInputIndex::ViewportSize as u64,
 429            mem::size_of_val(&viewport_size) as u64,
 430            &viewport_size as *const Size<DevicePixels> as *const _,
 431        );
 432
 433        let shadow_bytes_len = std::mem::size_of_val(shadows);
 434        let buffer_contents = unsafe { (self.instances.contents() as *mut u8).add(*offset) };
 435
 436        let next_offset = *offset + shadow_bytes_len;
 437        if next_offset > INSTANCE_BUFFER_SIZE {
 438            return false;
 439        }
 440
 441        unsafe {
 442            ptr::copy_nonoverlapping(
 443                shadows.as_ptr() as *const u8,
 444                buffer_contents,
 445                shadow_bytes_len,
 446            );
 447        }
 448
 449        command_encoder.draw_primitives_instanced(
 450            metal::MTLPrimitiveType::Triangle,
 451            0,
 452            6,
 453            shadows.len() as u64,
 454        );
 455        *offset = next_offset;
 456        true
 457    }
 458
 459    fn draw_quads(
 460        &mut self,
 461        quads: &[Quad],
 462        offset: &mut usize,
 463        viewport_size: Size<DevicePixels>,
 464        command_encoder: &metal::RenderCommandEncoderRef,
 465    ) -> bool {
 466        if quads.is_empty() {
 467            return true;
 468        }
 469        align_offset(offset);
 470
 471        command_encoder.set_render_pipeline_state(&self.quads_pipeline_state);
 472        command_encoder.set_vertex_buffer(
 473            QuadInputIndex::Vertices as u64,
 474            Some(&self.unit_vertices),
 475            0,
 476        );
 477        command_encoder.set_vertex_buffer(
 478            QuadInputIndex::Quads as u64,
 479            Some(&self.instances),
 480            *offset as u64,
 481        );
 482        command_encoder.set_fragment_buffer(
 483            QuadInputIndex::Quads as u64,
 484            Some(&self.instances),
 485            *offset as u64,
 486        );
 487
 488        command_encoder.set_vertex_bytes(
 489            QuadInputIndex::ViewportSize as u64,
 490            mem::size_of_val(&viewport_size) as u64,
 491            &viewport_size as *const Size<DevicePixels> as *const _,
 492        );
 493
 494        let quad_bytes_len = std::mem::size_of_val(quads);
 495        let buffer_contents = unsafe { (self.instances.contents() as *mut u8).add(*offset) };
 496
 497        let next_offset = *offset + quad_bytes_len;
 498        if next_offset > INSTANCE_BUFFER_SIZE {
 499            return false;
 500        }
 501
 502        unsafe {
 503            ptr::copy_nonoverlapping(quads.as_ptr() as *const u8, buffer_contents, quad_bytes_len);
 504        }
 505
 506        command_encoder.draw_primitives_instanced(
 507            metal::MTLPrimitiveType::Triangle,
 508            0,
 509            6,
 510            quads.len() as u64,
 511        );
 512        *offset = next_offset;
 513        true
 514    }
 515
 516    fn draw_paths(
 517        &mut self,
 518        paths: &[Path<ScaledPixels>],
 519        tiles_by_path_id: &HashMap<PathId, AtlasTile>,
 520        offset: &mut usize,
 521        viewport_size: Size<DevicePixels>,
 522        command_encoder: &metal::RenderCommandEncoderRef,
 523    ) -> bool {
 524        if paths.is_empty() {
 525            return true;
 526        }
 527
 528        command_encoder.set_render_pipeline_state(&self.path_sprites_pipeline_state);
 529        command_encoder.set_vertex_buffer(
 530            SpriteInputIndex::Vertices as u64,
 531            Some(&self.unit_vertices),
 532            0,
 533        );
 534        command_encoder.set_vertex_bytes(
 535            SpriteInputIndex::ViewportSize as u64,
 536            mem::size_of_val(&viewport_size) as u64,
 537            &viewport_size as *const Size<DevicePixels> as *const _,
 538        );
 539
 540        let mut prev_texture_id = None;
 541        let mut sprites = SmallVec::<[_; 1]>::new();
 542        let mut paths_and_tiles = paths
 543            .iter()
 544            .map(|path| (path, tiles_by_path_id.get(&path.id).unwrap()))
 545            .peekable();
 546
 547        loop {
 548            if let Some((path, tile)) = paths_and_tiles.peek() {
 549                if prev_texture_id.map_or(true, |texture_id| texture_id == tile.texture_id) {
 550                    prev_texture_id = Some(tile.texture_id);
 551                    let origin = path.bounds.intersect(&path.content_mask.bounds).origin;
 552                    sprites.push(PathSprite {
 553                        bounds: Bounds {
 554                            origin: origin.map(|p| p.floor()),
 555                            size: tile.bounds.size.map(Into::into),
 556                        },
 557                        color: path.color,
 558                        tile: (*tile).clone(),
 559                    });
 560                    paths_and_tiles.next();
 561                    continue;
 562                }
 563            }
 564
 565            if sprites.is_empty() {
 566                break;
 567            } else {
 568                align_offset(offset);
 569                let texture_id = prev_texture_id.take().unwrap();
 570                let texture: metal::Texture = self.sprite_atlas.metal_texture(texture_id);
 571                let texture_size = size(
 572                    DevicePixels(texture.width() as i32),
 573                    DevicePixels(texture.height() as i32),
 574                );
 575
 576                command_encoder.set_vertex_buffer(
 577                    SpriteInputIndex::Sprites as u64,
 578                    Some(&self.instances),
 579                    *offset as u64,
 580                );
 581                command_encoder.set_vertex_bytes(
 582                    SpriteInputIndex::AtlasTextureSize as u64,
 583                    mem::size_of_val(&texture_size) as u64,
 584                    &texture_size as *const Size<DevicePixels> as *const _,
 585                );
 586                command_encoder.set_fragment_buffer(
 587                    SpriteInputIndex::Sprites as u64,
 588                    Some(&self.instances),
 589                    *offset as u64,
 590                );
 591                command_encoder
 592                    .set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
 593
 594                let sprite_bytes_len = mem::size_of::<MonochromeSprite>() * sprites.len();
 595                let next_offset = *offset + sprite_bytes_len;
 596                if next_offset > INSTANCE_BUFFER_SIZE {
 597                    return false;
 598                }
 599
 600                let buffer_contents =
 601                    unsafe { (self.instances.contents() as *mut u8).add(*offset) };
 602
 603                unsafe {
 604                    ptr::copy_nonoverlapping(
 605                        sprites.as_ptr() as *const u8,
 606                        buffer_contents,
 607                        sprite_bytes_len,
 608                    );
 609                }
 610
 611                command_encoder.draw_primitives_instanced(
 612                    metal::MTLPrimitiveType::Triangle,
 613                    0,
 614                    6,
 615                    sprites.len() as u64,
 616                );
 617                *offset = next_offset;
 618                sprites.clear();
 619            }
 620        }
 621        true
 622    }
 623
 624    fn draw_underlines(
 625        &mut self,
 626        underlines: &[Underline],
 627        offset: &mut usize,
 628        viewport_size: Size<DevicePixels>,
 629        command_encoder: &metal::RenderCommandEncoderRef,
 630    ) -> bool {
 631        if underlines.is_empty() {
 632            return true;
 633        }
 634        align_offset(offset);
 635
 636        command_encoder.set_render_pipeline_state(&self.underlines_pipeline_state);
 637        command_encoder.set_vertex_buffer(
 638            UnderlineInputIndex::Vertices as u64,
 639            Some(&self.unit_vertices),
 640            0,
 641        );
 642        command_encoder.set_vertex_buffer(
 643            UnderlineInputIndex::Underlines as u64,
 644            Some(&self.instances),
 645            *offset as u64,
 646        );
 647        command_encoder.set_fragment_buffer(
 648            UnderlineInputIndex::Underlines as u64,
 649            Some(&self.instances),
 650            *offset as u64,
 651        );
 652
 653        command_encoder.set_vertex_bytes(
 654            UnderlineInputIndex::ViewportSize as u64,
 655            mem::size_of_val(&viewport_size) as u64,
 656            &viewport_size as *const Size<DevicePixels> as *const _,
 657        );
 658
 659        let quad_bytes_len = std::mem::size_of_val(underlines);
 660        let buffer_contents = unsafe { (self.instances.contents() as *mut u8).add(*offset) };
 661        unsafe {
 662            ptr::copy_nonoverlapping(
 663                underlines.as_ptr() as *const u8,
 664                buffer_contents,
 665                quad_bytes_len,
 666            );
 667        }
 668
 669        let next_offset = *offset + quad_bytes_len;
 670        if next_offset > INSTANCE_BUFFER_SIZE {
 671            return false;
 672        }
 673
 674        command_encoder.draw_primitives_instanced(
 675            metal::MTLPrimitiveType::Triangle,
 676            0,
 677            6,
 678            underlines.len() as u64,
 679        );
 680        *offset = next_offset;
 681        true
 682    }
 683
 684    fn draw_monochrome_sprites(
 685        &mut self,
 686        texture_id: AtlasTextureId,
 687        sprites: &[MonochromeSprite],
 688        offset: &mut usize,
 689        viewport_size: Size<DevicePixels>,
 690        command_encoder: &metal::RenderCommandEncoderRef,
 691    ) -> bool {
 692        if sprites.is_empty() {
 693            return true;
 694        }
 695        align_offset(offset);
 696
 697        let texture = self.sprite_atlas.metal_texture(texture_id);
 698        let texture_size = size(
 699            DevicePixels(texture.width() as i32),
 700            DevicePixels(texture.height() as i32),
 701        );
 702        command_encoder.set_render_pipeline_state(&self.monochrome_sprites_pipeline_state);
 703        command_encoder.set_vertex_buffer(
 704            SpriteInputIndex::Vertices as u64,
 705            Some(&self.unit_vertices),
 706            0,
 707        );
 708        command_encoder.set_vertex_buffer(
 709            SpriteInputIndex::Sprites as u64,
 710            Some(&self.instances),
 711            *offset as u64,
 712        );
 713        command_encoder.set_vertex_bytes(
 714            SpriteInputIndex::ViewportSize as u64,
 715            mem::size_of_val(&viewport_size) as u64,
 716            &viewport_size as *const Size<DevicePixels> as *const _,
 717        );
 718        command_encoder.set_vertex_bytes(
 719            SpriteInputIndex::AtlasTextureSize as u64,
 720            mem::size_of_val(&texture_size) as u64,
 721            &texture_size as *const Size<DevicePixels> as *const _,
 722        );
 723        command_encoder.set_fragment_buffer(
 724            SpriteInputIndex::Sprites as u64,
 725            Some(&self.instances),
 726            *offset as u64,
 727        );
 728        command_encoder.set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
 729
 730        let sprite_bytes_len = std::mem::size_of_val(sprites);
 731        let buffer_contents = unsafe { (self.instances.contents() as *mut u8).add(*offset) };
 732
 733        let next_offset = *offset + sprite_bytes_len;
 734        if next_offset > INSTANCE_BUFFER_SIZE {
 735            return false;
 736        }
 737
 738        unsafe {
 739            ptr::copy_nonoverlapping(
 740                sprites.as_ptr() as *const u8,
 741                buffer_contents,
 742                sprite_bytes_len,
 743            );
 744        }
 745
 746        command_encoder.draw_primitives_instanced(
 747            metal::MTLPrimitiveType::Triangle,
 748            0,
 749            6,
 750            sprites.len() as u64,
 751        );
 752        *offset = next_offset;
 753        true
 754    }
 755
 756    fn draw_polychrome_sprites(
 757        &mut self,
 758        texture_id: AtlasTextureId,
 759        sprites: &[PolychromeSprite],
 760        offset: &mut usize,
 761        viewport_size: Size<DevicePixels>,
 762        command_encoder: &metal::RenderCommandEncoderRef,
 763    ) -> bool {
 764        if sprites.is_empty() {
 765            return true;
 766        }
 767        align_offset(offset);
 768
 769        let texture = self.sprite_atlas.metal_texture(texture_id);
 770        let texture_size = size(
 771            DevicePixels(texture.width() as i32),
 772            DevicePixels(texture.height() as i32),
 773        );
 774        command_encoder.set_render_pipeline_state(&self.polychrome_sprites_pipeline_state);
 775        command_encoder.set_vertex_buffer(
 776            SpriteInputIndex::Vertices as u64,
 777            Some(&self.unit_vertices),
 778            0,
 779        );
 780        command_encoder.set_vertex_buffer(
 781            SpriteInputIndex::Sprites as u64,
 782            Some(&self.instances),
 783            *offset as u64,
 784        );
 785        command_encoder.set_vertex_bytes(
 786            SpriteInputIndex::ViewportSize as u64,
 787            mem::size_of_val(&viewport_size) as u64,
 788            &viewport_size as *const Size<DevicePixels> as *const _,
 789        );
 790        command_encoder.set_vertex_bytes(
 791            SpriteInputIndex::AtlasTextureSize as u64,
 792            mem::size_of_val(&texture_size) as u64,
 793            &texture_size as *const Size<DevicePixels> as *const _,
 794        );
 795        command_encoder.set_fragment_buffer(
 796            SpriteInputIndex::Sprites as u64,
 797            Some(&self.instances),
 798            *offset as u64,
 799        );
 800        command_encoder.set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
 801
 802        let sprite_bytes_len = std::mem::size_of_val(sprites);
 803        let buffer_contents = unsafe { (self.instances.contents() as *mut u8).add(*offset) };
 804
 805        let next_offset = *offset + sprite_bytes_len;
 806        if next_offset > INSTANCE_BUFFER_SIZE {
 807            return false;
 808        }
 809
 810        unsafe {
 811            ptr::copy_nonoverlapping(
 812                sprites.as_ptr() as *const u8,
 813                buffer_contents,
 814                sprite_bytes_len,
 815            );
 816        }
 817
 818        command_encoder.draw_primitives_instanced(
 819            metal::MTLPrimitiveType::Triangle,
 820            0,
 821            6,
 822            sprites.len() as u64,
 823        );
 824        *offset = next_offset;
 825        true
 826    }
 827
 828    fn draw_surfaces(
 829        &mut self,
 830        surfaces: &[Surface],
 831        offset: &mut usize,
 832        viewport_size: Size<DevicePixels>,
 833        command_encoder: &metal::RenderCommandEncoderRef,
 834    ) -> bool {
 835        command_encoder.set_render_pipeline_state(&self.surfaces_pipeline_state);
 836        command_encoder.set_vertex_buffer(
 837            SurfaceInputIndex::Vertices as u64,
 838            Some(&self.unit_vertices),
 839            0,
 840        );
 841        command_encoder.set_vertex_bytes(
 842            SurfaceInputIndex::ViewportSize as u64,
 843            mem::size_of_val(&viewport_size) as u64,
 844            &viewport_size as *const Size<DevicePixels> as *const _,
 845        );
 846
 847        for surface in surfaces {
 848            let texture_size = size(
 849                DevicePixels::from(surface.image_buffer.width() as i32),
 850                DevicePixels::from(surface.image_buffer.height() as i32),
 851            );
 852
 853            assert_eq!(
 854                surface.image_buffer.pixel_format_type(),
 855                media::core_video::kCVPixelFormatType_420YpCbCr8BiPlanarFullRange
 856            );
 857
 858            let y_texture = unsafe {
 859                self.core_video_texture_cache
 860                    .create_texture_from_image(
 861                        surface.image_buffer.as_concrete_TypeRef(),
 862                        ptr::null(),
 863                        MTLPixelFormat::R8Unorm,
 864                        surface.image_buffer.plane_width(0),
 865                        surface.image_buffer.plane_height(0),
 866                        0,
 867                    )
 868                    .unwrap()
 869            };
 870            let cb_cr_texture = unsafe {
 871                self.core_video_texture_cache
 872                    .create_texture_from_image(
 873                        surface.image_buffer.as_concrete_TypeRef(),
 874                        ptr::null(),
 875                        MTLPixelFormat::RG8Unorm,
 876                        surface.image_buffer.plane_width(1),
 877                        surface.image_buffer.plane_height(1),
 878                        1,
 879                    )
 880                    .unwrap()
 881            };
 882
 883            align_offset(offset);
 884            let next_offset = *offset + mem::size_of::<Surface>();
 885            if next_offset > INSTANCE_BUFFER_SIZE {
 886                return false;
 887            }
 888
 889            command_encoder.set_vertex_buffer(
 890                SurfaceInputIndex::Surfaces as u64,
 891                Some(&self.instances),
 892                *offset as u64,
 893            );
 894            command_encoder.set_vertex_bytes(
 895                SurfaceInputIndex::TextureSize as u64,
 896                mem::size_of_val(&texture_size) as u64,
 897                &texture_size as *const Size<DevicePixels> as *const _,
 898            );
 899            command_encoder.set_fragment_texture(
 900                SurfaceInputIndex::YTexture as u64,
 901                Some(y_texture.as_texture_ref()),
 902            );
 903            command_encoder.set_fragment_texture(
 904                SurfaceInputIndex::CbCrTexture as u64,
 905                Some(cb_cr_texture.as_texture_ref()),
 906            );
 907
 908            unsafe {
 909                let buffer_contents =
 910                    (self.instances.contents() as *mut u8).add(*offset) as *mut SurfaceBounds;
 911                ptr::write(
 912                    buffer_contents,
 913                    SurfaceBounds {
 914                        bounds: surface.bounds,
 915                        content_mask: surface.content_mask.clone(),
 916                    },
 917                );
 918            }
 919
 920            command_encoder.draw_primitives(metal::MTLPrimitiveType::Triangle, 0, 6);
 921            *offset = next_offset;
 922        }
 923        true
 924    }
 925}
 926
 927fn build_pipeline_state(
 928    device: &metal::DeviceRef,
 929    library: &metal::LibraryRef,
 930    label: &str,
 931    vertex_fn_name: &str,
 932    fragment_fn_name: &str,
 933    pixel_format: metal::MTLPixelFormat,
 934) -> metal::RenderPipelineState {
 935    let vertex_fn = library
 936        .get_function(vertex_fn_name, None)
 937        .expect("error locating vertex function");
 938    let fragment_fn = library
 939        .get_function(fragment_fn_name, None)
 940        .expect("error locating fragment function");
 941
 942    let descriptor = metal::RenderPipelineDescriptor::new();
 943    descriptor.set_label(label);
 944    descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
 945    descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
 946    let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
 947    color_attachment.set_pixel_format(pixel_format);
 948    color_attachment.set_blending_enabled(true);
 949    color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
 950    color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
 951    color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::SourceAlpha);
 952    color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::One);
 953    color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
 954    color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::One);
 955
 956    device
 957        .new_render_pipeline_state(&descriptor)
 958        .expect("could not create render pipeline state")
 959}
 960
 961fn build_path_rasterization_pipeline_state(
 962    device: &metal::DeviceRef,
 963    library: &metal::LibraryRef,
 964    label: &str,
 965    vertex_fn_name: &str,
 966    fragment_fn_name: &str,
 967    pixel_format: metal::MTLPixelFormat,
 968) -> metal::RenderPipelineState {
 969    let vertex_fn = library
 970        .get_function(vertex_fn_name, None)
 971        .expect("error locating vertex function");
 972    let fragment_fn = library
 973        .get_function(fragment_fn_name, None)
 974        .expect("error locating fragment function");
 975
 976    let descriptor = metal::RenderPipelineDescriptor::new();
 977    descriptor.set_label(label);
 978    descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
 979    descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
 980    let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
 981    color_attachment.set_pixel_format(pixel_format);
 982    color_attachment.set_blending_enabled(true);
 983    color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
 984    color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
 985    color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::One);
 986    color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::One);
 987    color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::One);
 988    color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::One);
 989
 990    device
 991        .new_render_pipeline_state(&descriptor)
 992        .expect("could not create render pipeline state")
 993}
 994
 995// Align to multiples of 256 make Metal happy.
 996fn align_offset(offset: &mut usize) {
 997    *offset = ((*offset + 255) / 256) * 256;
 998}
 999
1000#[repr(C)]
1001enum ShadowInputIndex {
1002    Vertices = 0,
1003    Shadows = 1,
1004    ViewportSize = 2,
1005}
1006
1007#[repr(C)]
1008enum QuadInputIndex {
1009    Vertices = 0,
1010    Quads = 1,
1011    ViewportSize = 2,
1012}
1013
1014#[repr(C)]
1015enum UnderlineInputIndex {
1016    Vertices = 0,
1017    Underlines = 1,
1018    ViewportSize = 2,
1019}
1020
1021#[repr(C)]
1022enum SpriteInputIndex {
1023    Vertices = 0,
1024    Sprites = 1,
1025    ViewportSize = 2,
1026    AtlasTextureSize = 3,
1027    AtlasTexture = 4,
1028}
1029
1030#[repr(C)]
1031enum SurfaceInputIndex {
1032    Vertices = 0,
1033    Surfaces = 1,
1034    ViewportSize = 2,
1035    TextureSize = 3,
1036    YTexture = 4,
1037    CbCrTexture = 5,
1038}
1039
1040#[repr(C)]
1041enum PathRasterizationInputIndex {
1042    Vertices = 0,
1043    AtlasTextureSize = 1,
1044}
1045
1046#[derive(Clone, Debug, Eq, PartialEq)]
1047#[repr(C)]
1048pub struct PathSprite {
1049    pub bounds: Bounds<ScaledPixels>,
1050    pub color: Hsla,
1051    pub tile: AtlasTile,
1052}
1053
1054#[derive(Clone, Debug, Eq, PartialEq)]
1055#[repr(C)]
1056pub struct SurfaceBounds {
1057    pub bounds: Bounds<ScaledPixels>,
1058    pub content_mask: ContentMask<ScaledPixels>,
1059}