metal_renderer.rs

   1use super::metal_atlas::MetalAtlas;
   2use crate::{
   3    AtlasTextureId, Background, Bounds, ContentMask, DevicePixels, MonochromeSprite, PaintSurface,
   4    Path, Point, PolychromeSprite, PrimitiveBatch, Quad, ScaledPixels, Scene, Shadow, Size,
   5    Surface, Underline, point, size,
   6};
   7use anyhow::Result;
   8use block::ConcreteBlock;
   9use cocoa::{
  10    base::{NO, YES},
  11    foundation::{NSSize, NSUInteger},
  12    quartzcore::AutoresizingMask,
  13};
  14#[cfg(any(test, feature = "test-support"))]
  15use image::RgbaImage;
  16
  17use core_foundation::base::TCFType;
  18use core_video::{
  19    metal_texture::CVMetalTextureGetTexture, metal_texture_cache::CVMetalTextureCache,
  20    pixel_buffer::kCVPixelFormatType_420YpCbCr8BiPlanarFullRange,
  21};
  22use foreign_types::{ForeignType, ForeignTypeRef};
  23use metal::{
  24    CAMetalLayer, CommandQueue, MTLPixelFormat, MTLResourceOptions, NSRange,
  25    RenderPassColorAttachmentDescriptorRef,
  26};
  27use objc::{self, msg_send, sel, sel_impl};
  28use parking_lot::Mutex;
  29
  30use std::{cell::Cell, ffi::c_void, mem, ptr, sync::Arc};
  31
  32// Exported to metal
  33pub(crate) type PointF = crate::Point<f32>;
  34
  35#[cfg(not(feature = "runtime_shaders"))]
  36const SHADERS_METALLIB: &[u8] = include_bytes!(concat!(env!("OUT_DIR"), "/shaders.metallib"));
  37#[cfg(feature = "runtime_shaders")]
  38const SHADERS_SOURCE_FILE: &str = include_str!(concat!(env!("OUT_DIR"), "/stitched_shaders.metal"));
  39// Use 4x MSAA, all devices support it.
  40// https://developer.apple.com/documentation/metal/mtldevice/1433355-supportstexturesamplecount
  41const PATH_SAMPLE_COUNT: u32 = 4;
  42
  43pub type Context = Arc<Mutex<InstanceBufferPool>>;
  44pub type Renderer = MetalRenderer;
  45
  46pub unsafe fn new_renderer(
  47    context: self::Context,
  48    _native_window: *mut c_void,
  49    _native_view: *mut c_void,
  50    _bounds: crate::Size<f32>,
  51    transparent: bool,
  52) -> Renderer {
  53    MetalRenderer::new(context, transparent)
  54}
  55
  56pub(crate) struct InstanceBufferPool {
  57    buffer_size: usize,
  58    buffers: Vec<metal::Buffer>,
  59}
  60
  61impl Default for InstanceBufferPool {
  62    fn default() -> Self {
  63        Self {
  64            buffer_size: 2 * 1024 * 1024,
  65            buffers: Vec::new(),
  66        }
  67    }
  68}
  69
  70pub(crate) struct InstanceBuffer {
  71    metal_buffer: metal::Buffer,
  72    size: usize,
  73}
  74
  75impl InstanceBufferPool {
  76    pub(crate) fn reset(&mut self, buffer_size: usize) {
  77        self.buffer_size = buffer_size;
  78        self.buffers.clear();
  79    }
  80
  81    pub(crate) fn acquire(&mut self, device: &metal::Device) -> InstanceBuffer {
  82        let buffer = self.buffers.pop().unwrap_or_else(|| {
  83            device.new_buffer(
  84                self.buffer_size as u64,
  85                MTLResourceOptions::StorageModeManaged,
  86            )
  87        });
  88        InstanceBuffer {
  89            metal_buffer: buffer,
  90            size: self.buffer_size,
  91        }
  92    }
  93
  94    pub(crate) fn release(&mut self, buffer: InstanceBuffer) {
  95        if buffer.size == self.buffer_size {
  96            self.buffers.push(buffer.metal_buffer)
  97        }
  98    }
  99}
 100
 101pub(crate) struct MetalRenderer {
 102    device: metal::Device,
 103    layer: metal::MetalLayer,
 104    presents_with_transaction: bool,
 105    command_queue: CommandQueue,
 106    paths_rasterization_pipeline_state: metal::RenderPipelineState,
 107    path_sprites_pipeline_state: metal::RenderPipelineState,
 108    shadows_pipeline_state: metal::RenderPipelineState,
 109    quads_pipeline_state: metal::RenderPipelineState,
 110    underlines_pipeline_state: metal::RenderPipelineState,
 111    monochrome_sprites_pipeline_state: metal::RenderPipelineState,
 112    polychrome_sprites_pipeline_state: metal::RenderPipelineState,
 113    surfaces_pipeline_state: metal::RenderPipelineState,
 114    unit_vertices: metal::Buffer,
 115    #[allow(clippy::arc_with_non_send_sync)]
 116    instance_buffer_pool: Arc<Mutex<InstanceBufferPool>>,
 117    sprite_atlas: Arc<MetalAtlas>,
 118    core_video_texture_cache: core_video::metal_texture_cache::CVMetalTextureCache,
 119    path_intermediate_texture: Option<metal::Texture>,
 120    path_intermediate_msaa_texture: Option<metal::Texture>,
 121    path_sample_count: u32,
 122}
 123
 124#[repr(C)]
 125pub struct PathRasterizationVertex {
 126    pub xy_position: Point<ScaledPixels>,
 127    pub st_position: Point<f32>,
 128    pub color: Background,
 129    pub bounds: Bounds<ScaledPixels>,
 130}
 131
 132impl MetalRenderer {
 133    pub fn new(instance_buffer_pool: Arc<Mutex<InstanceBufferPool>>, transparent: bool) -> Self {
 134        // Prefer low‐power integrated GPUs on Intel Mac. On Apple
 135        // Silicon, there is only ever one GPU, so this is equivalent to
 136        // `metal::Device::system_default()`.
 137        let device = if let Some(d) = metal::Device::all()
 138            .into_iter()
 139            .min_by_key(|d| (d.is_removable(), !d.is_low_power()))
 140        {
 141            d
 142        } else {
 143            // For some reason `all()` can return an empty list, see https://github.com/zed-industries/zed/issues/37689
 144            // In that case, we fall back to the system default device.
 145            log::error!(
 146                "Unable to enumerate Metal devices; attempting to use system default device"
 147            );
 148            metal::Device::system_default().unwrap_or_else(|| {
 149                log::error!("unable to access a compatible graphics device");
 150                std::process::exit(1);
 151            })
 152        };
 153
 154        let layer = metal::MetalLayer::new();
 155        layer.set_device(&device);
 156        layer.set_pixel_format(MTLPixelFormat::BGRA8Unorm);
 157        // Support direct-to-display rendering if the window is not transparent
 158        // https://developer.apple.com/documentation/metal/managing-your-game-window-for-metal-in-macos
 159        layer.set_opaque(!transparent);
 160        layer.set_maximum_drawable_count(3);
 161        // Allow texture reading for visual tests (captures screenshots without ScreenCaptureKit)
 162        #[cfg(any(test, feature = "test-support"))]
 163        layer.set_framebuffer_only(false);
 164        unsafe {
 165            let _: () = msg_send![&*layer, setAllowsNextDrawableTimeout: NO];
 166            let _: () = msg_send![&*layer, setNeedsDisplayOnBoundsChange: YES];
 167            let _: () = msg_send![
 168                &*layer,
 169                setAutoresizingMask: AutoresizingMask::WIDTH_SIZABLE
 170                    | AutoresizingMask::HEIGHT_SIZABLE
 171            ];
 172        }
 173        #[cfg(feature = "runtime_shaders")]
 174        let library = device
 175            .new_library_with_source(&SHADERS_SOURCE_FILE, &metal::CompileOptions::new())
 176            .expect("error building metal library");
 177        #[cfg(not(feature = "runtime_shaders"))]
 178        let library = device
 179            .new_library_with_data(SHADERS_METALLIB)
 180            .expect("error building metal library");
 181
 182        fn to_float2_bits(point: PointF) -> u64 {
 183            let mut output = point.y.to_bits() as u64;
 184            output <<= 32;
 185            output |= point.x.to_bits() as u64;
 186            output
 187        }
 188
 189        let unit_vertices = [
 190            to_float2_bits(point(0., 0.)),
 191            to_float2_bits(point(1., 0.)),
 192            to_float2_bits(point(0., 1.)),
 193            to_float2_bits(point(0., 1.)),
 194            to_float2_bits(point(1., 0.)),
 195            to_float2_bits(point(1., 1.)),
 196        ];
 197        let unit_vertices = device.new_buffer_with_data(
 198            unit_vertices.as_ptr() as *const c_void,
 199            mem::size_of_val(&unit_vertices) as u64,
 200            MTLResourceOptions::StorageModeManaged,
 201        );
 202
 203        let paths_rasterization_pipeline_state = build_path_rasterization_pipeline_state(
 204            &device,
 205            &library,
 206            "paths_rasterization",
 207            "path_rasterization_vertex",
 208            "path_rasterization_fragment",
 209            MTLPixelFormat::BGRA8Unorm,
 210            PATH_SAMPLE_COUNT,
 211        );
 212        let path_sprites_pipeline_state = build_path_sprite_pipeline_state(
 213            &device,
 214            &library,
 215            "path_sprites",
 216            "path_sprite_vertex",
 217            "path_sprite_fragment",
 218            MTLPixelFormat::BGRA8Unorm,
 219        );
 220        let shadows_pipeline_state = build_pipeline_state(
 221            &device,
 222            &library,
 223            "shadows",
 224            "shadow_vertex",
 225            "shadow_fragment",
 226            MTLPixelFormat::BGRA8Unorm,
 227        );
 228        let quads_pipeline_state = build_pipeline_state(
 229            &device,
 230            &library,
 231            "quads",
 232            "quad_vertex",
 233            "quad_fragment",
 234            MTLPixelFormat::BGRA8Unorm,
 235        );
 236        let underlines_pipeline_state = build_pipeline_state(
 237            &device,
 238            &library,
 239            "underlines",
 240            "underline_vertex",
 241            "underline_fragment",
 242            MTLPixelFormat::BGRA8Unorm,
 243        );
 244        let monochrome_sprites_pipeline_state = build_pipeline_state(
 245            &device,
 246            &library,
 247            "monochrome_sprites",
 248            "monochrome_sprite_vertex",
 249            "monochrome_sprite_fragment",
 250            MTLPixelFormat::BGRA8Unorm,
 251        );
 252        let polychrome_sprites_pipeline_state = build_pipeline_state(
 253            &device,
 254            &library,
 255            "polychrome_sprites",
 256            "polychrome_sprite_vertex",
 257            "polychrome_sprite_fragment",
 258            MTLPixelFormat::BGRA8Unorm,
 259        );
 260        let surfaces_pipeline_state = build_pipeline_state(
 261            &device,
 262            &library,
 263            "surfaces",
 264            "surface_vertex",
 265            "surface_fragment",
 266            MTLPixelFormat::BGRA8Unorm,
 267        );
 268
 269        let command_queue = device.new_command_queue();
 270        let sprite_atlas = Arc::new(MetalAtlas::new(device.clone()));
 271        let core_video_texture_cache =
 272            CVMetalTextureCache::new(None, device.clone(), None).unwrap();
 273
 274        Self {
 275            device,
 276            layer,
 277            presents_with_transaction: false,
 278            command_queue,
 279            paths_rasterization_pipeline_state,
 280            path_sprites_pipeline_state,
 281            shadows_pipeline_state,
 282            quads_pipeline_state,
 283            underlines_pipeline_state,
 284            monochrome_sprites_pipeline_state,
 285            polychrome_sprites_pipeline_state,
 286            surfaces_pipeline_state,
 287            unit_vertices,
 288            instance_buffer_pool,
 289            sprite_atlas,
 290            core_video_texture_cache,
 291            path_intermediate_texture: None,
 292            path_intermediate_msaa_texture: None,
 293            path_sample_count: PATH_SAMPLE_COUNT,
 294        }
 295    }
 296
 297    pub fn layer(&self) -> &metal::MetalLayerRef {
 298        &self.layer
 299    }
 300
 301    pub fn layer_ptr(&self) -> *mut CAMetalLayer {
 302        self.layer.as_ptr()
 303    }
 304
 305    pub fn sprite_atlas(&self) -> &Arc<MetalAtlas> {
 306        &self.sprite_atlas
 307    }
 308
 309    pub fn set_presents_with_transaction(&mut self, presents_with_transaction: bool) {
 310        self.presents_with_transaction = presents_with_transaction;
 311        self.layer
 312            .set_presents_with_transaction(presents_with_transaction);
 313    }
 314
 315    pub fn update_drawable_size(&mut self, size: Size<DevicePixels>) {
 316        let size = NSSize {
 317            width: size.width.0 as f64,
 318            height: size.height.0 as f64,
 319        };
 320        unsafe {
 321            let _: () = msg_send![
 322                self.layer(),
 323                setDrawableSize: size
 324            ];
 325        }
 326        let device_pixels_size = Size {
 327            width: DevicePixels(size.width as i32),
 328            height: DevicePixels(size.height as i32),
 329        };
 330        self.update_path_intermediate_textures(device_pixels_size);
 331    }
 332
 333    fn update_path_intermediate_textures(&mut self, size: Size<DevicePixels>) {
 334        // We are uncertain when this happens, but sometimes size can be 0 here. Most likely before
 335        // the layout pass on window creation. Zero-sized texture creation causes SIGABRT.
 336        // https://github.com/zed-industries/zed/issues/36229
 337        if size.width.0 <= 0 || size.height.0 <= 0 {
 338            self.path_intermediate_texture = None;
 339            self.path_intermediate_msaa_texture = None;
 340            return;
 341        }
 342
 343        let texture_descriptor = metal::TextureDescriptor::new();
 344        texture_descriptor.set_width(size.width.0 as u64);
 345        texture_descriptor.set_height(size.height.0 as u64);
 346        texture_descriptor.set_pixel_format(metal::MTLPixelFormat::BGRA8Unorm);
 347        texture_descriptor
 348            .set_usage(metal::MTLTextureUsage::RenderTarget | metal::MTLTextureUsage::ShaderRead);
 349        self.path_intermediate_texture = Some(self.device.new_texture(&texture_descriptor));
 350
 351        if self.path_sample_count > 1 {
 352            let mut msaa_descriptor = texture_descriptor;
 353            msaa_descriptor.set_texture_type(metal::MTLTextureType::D2Multisample);
 354            msaa_descriptor.set_storage_mode(metal::MTLStorageMode::Private);
 355            msaa_descriptor.set_sample_count(self.path_sample_count as _);
 356            self.path_intermediate_msaa_texture = Some(self.device.new_texture(&msaa_descriptor));
 357        } else {
 358            self.path_intermediate_msaa_texture = None;
 359        }
 360    }
 361
 362    pub fn update_transparency(&self, transparent: bool) {
 363        self.layer.set_opaque(!transparent);
 364    }
 365
 366    pub fn destroy(&self) {
 367        // nothing to do
 368    }
 369
 370    pub fn draw(&mut self, scene: &Scene) {
 371        let layer = self.layer.clone();
 372        let viewport_size = layer.drawable_size();
 373        let viewport_size: Size<DevicePixels> = size(
 374            (viewport_size.width.ceil() as i32).into(),
 375            (viewport_size.height.ceil() as i32).into(),
 376        );
 377        let drawable = if let Some(drawable) = layer.next_drawable() {
 378            drawable
 379        } else {
 380            log::error!(
 381                "failed to retrieve next drawable, drawable size: {:?}",
 382                viewport_size
 383            );
 384            return;
 385        };
 386
 387        loop {
 388            let mut instance_buffer = self.instance_buffer_pool.lock().acquire(&self.device);
 389
 390            let command_buffer =
 391                self.draw_primitives(scene, &mut instance_buffer, drawable, viewport_size);
 392
 393            match command_buffer {
 394                Ok(command_buffer) => {
 395                    let instance_buffer_pool = self.instance_buffer_pool.clone();
 396                    let instance_buffer = Cell::new(Some(instance_buffer));
 397                    let block = ConcreteBlock::new(move |_| {
 398                        if let Some(instance_buffer) = instance_buffer.take() {
 399                            instance_buffer_pool.lock().release(instance_buffer);
 400                        }
 401                    });
 402                    let block = block.copy();
 403                    command_buffer.add_completed_handler(&block);
 404
 405                    if self.presents_with_transaction {
 406                        command_buffer.commit();
 407                        command_buffer.wait_until_scheduled();
 408                        drawable.present();
 409                    } else {
 410                        command_buffer.present_drawable(drawable);
 411                        command_buffer.commit();
 412                    }
 413                    return;
 414                }
 415                Err(err) => {
 416                    log::error!(
 417                        "failed to render: {}. retrying with larger instance buffer size",
 418                        err
 419                    );
 420                    let mut instance_buffer_pool = self.instance_buffer_pool.lock();
 421                    let buffer_size = instance_buffer_pool.buffer_size;
 422                    if buffer_size >= 256 * 1024 * 1024 {
 423                        log::error!("instance buffer size grew too large: {}", buffer_size);
 424                        break;
 425                    }
 426                    instance_buffer_pool.reset(buffer_size * 2);
 427                    log::info!(
 428                        "increased instance buffer size to {}",
 429                        instance_buffer_pool.buffer_size
 430                    );
 431                }
 432            }
 433        }
 434    }
 435
 436    /// Renders the scene to a texture and returns the pixel data as an RGBA image.
 437    /// This does not present the frame to screen - useful for visual testing
 438    /// where we want to capture what would be rendered without displaying it.
 439    #[cfg(any(test, feature = "test-support"))]
 440    pub fn render_to_image(&mut self, scene: &Scene) -> Result<RgbaImage> {
 441        let layer = self.layer.clone();
 442        let viewport_size = layer.drawable_size();
 443        let viewport_size: Size<DevicePixels> = size(
 444            (viewport_size.width.ceil() as i32).into(),
 445            (viewport_size.height.ceil() as i32).into(),
 446        );
 447        let drawable = layer
 448            .next_drawable()
 449            .ok_or_else(|| anyhow::anyhow!("Failed to get drawable for render_to_image"))?;
 450
 451        loop {
 452            let mut instance_buffer = self.instance_buffer_pool.lock().acquire(&self.device);
 453
 454            let command_buffer =
 455                self.draw_primitives(scene, &mut instance_buffer, drawable, viewport_size);
 456
 457            match command_buffer {
 458                Ok(command_buffer) => {
 459                    let instance_buffer_pool = self.instance_buffer_pool.clone();
 460                    let instance_buffer = Cell::new(Some(instance_buffer));
 461                    let block = ConcreteBlock::new(move |_| {
 462                        if let Some(instance_buffer) = instance_buffer.take() {
 463                            instance_buffer_pool.lock().release(instance_buffer);
 464                        }
 465                    });
 466                    let block = block.copy();
 467                    command_buffer.add_completed_handler(&block);
 468
 469                    // Commit and wait for completion without presenting
 470                    command_buffer.commit();
 471                    command_buffer.wait_until_completed();
 472
 473                    // Read pixels from the texture
 474                    let texture = drawable.texture();
 475                    let width = texture.width() as u32;
 476                    let height = texture.height() as u32;
 477                    let bytes_per_row = width as usize * 4;
 478                    let buffer_size = height as usize * bytes_per_row;
 479
 480                    let mut pixels = vec![0u8; buffer_size];
 481
 482                    let region = metal::MTLRegion {
 483                        origin: metal::MTLOrigin { x: 0, y: 0, z: 0 },
 484                        size: metal::MTLSize {
 485                            width: width as u64,
 486                            height: height as u64,
 487                            depth: 1,
 488                        },
 489                    };
 490
 491                    texture.get_bytes(
 492                        pixels.as_mut_ptr() as *mut std::ffi::c_void,
 493                        bytes_per_row as u64,
 494                        region,
 495                        0,
 496                    );
 497
 498                    // Convert BGRA to RGBA (swap B and R channels)
 499                    for chunk in pixels.chunks_exact_mut(4) {
 500                        chunk.swap(0, 2);
 501                    }
 502
 503                    return RgbaImage::from_raw(width, height, pixels).ok_or_else(|| {
 504                        anyhow::anyhow!("Failed to create RgbaImage from pixel data")
 505                    });
 506                }
 507                Err(err) => {
 508                    log::error!(
 509                        "failed to render: {}. retrying with larger instance buffer size",
 510                        err
 511                    );
 512                    let mut instance_buffer_pool = self.instance_buffer_pool.lock();
 513                    let buffer_size = instance_buffer_pool.buffer_size;
 514                    if buffer_size >= 256 * 1024 * 1024 {
 515                        anyhow::bail!("instance buffer size grew too large: {}", buffer_size);
 516                    }
 517                    instance_buffer_pool.reset(buffer_size * 2);
 518                    log::info!(
 519                        "increased instance buffer size to {}",
 520                        instance_buffer_pool.buffer_size
 521                    );
 522                }
 523            }
 524        }
 525    }
 526
 527    fn draw_primitives(
 528        &mut self,
 529        scene: &Scene,
 530        instance_buffer: &mut InstanceBuffer,
 531        drawable: &metal::MetalDrawableRef,
 532        viewport_size: Size<DevicePixels>,
 533    ) -> Result<metal::CommandBuffer> {
 534        let command_queue = self.command_queue.clone();
 535        let command_buffer = command_queue.new_command_buffer();
 536        let alpha = if self.layer.is_opaque() { 1. } else { 0. };
 537        let mut instance_offset = 0;
 538
 539        let mut command_encoder = new_command_encoder(
 540            command_buffer,
 541            drawable,
 542            viewport_size,
 543            |color_attachment| {
 544                color_attachment.set_load_action(metal::MTLLoadAction::Clear);
 545                color_attachment.set_clear_color(metal::MTLClearColor::new(0., 0., 0., alpha));
 546            },
 547        );
 548
 549        for batch in scene.batches() {
 550            let ok = match batch {
 551                PrimitiveBatch::Shadows(range) => self.draw_shadows(
 552                    &scene.shadows[range],
 553                    instance_buffer,
 554                    &mut instance_offset,
 555                    viewport_size,
 556                    command_encoder,
 557                ),
 558                PrimitiveBatch::Quads(range) => self.draw_quads(
 559                    &scene.quads[range],
 560                    instance_buffer,
 561                    &mut instance_offset,
 562                    viewport_size,
 563                    command_encoder,
 564                ),
 565                PrimitiveBatch::Paths(range) => {
 566                    let paths = &scene.paths[range];
 567                    command_encoder.end_encoding();
 568
 569                    let did_draw = self.draw_paths_to_intermediate(
 570                        paths,
 571                        instance_buffer,
 572                        &mut instance_offset,
 573                        viewport_size,
 574                        command_buffer,
 575                    );
 576
 577                    command_encoder = new_command_encoder(
 578                        command_buffer,
 579                        drawable,
 580                        viewport_size,
 581                        |color_attachment| {
 582                            color_attachment.set_load_action(metal::MTLLoadAction::Load);
 583                        },
 584                    );
 585
 586                    if did_draw {
 587                        self.draw_paths_from_intermediate(
 588                            paths,
 589                            instance_buffer,
 590                            &mut instance_offset,
 591                            viewport_size,
 592                            command_encoder,
 593                        )
 594                    } else {
 595                        false
 596                    }
 597                }
 598                PrimitiveBatch::Underlines(range) => self.draw_underlines(
 599                    &scene.underlines[range],
 600                    instance_buffer,
 601                    &mut instance_offset,
 602                    viewport_size,
 603                    command_encoder,
 604                ),
 605                PrimitiveBatch::MonochromeSprites { texture_id, range } => self
 606                    .draw_monochrome_sprites(
 607                        texture_id,
 608                        &scene.monochrome_sprites[range],
 609                        instance_buffer,
 610                        &mut instance_offset,
 611                        viewport_size,
 612                        command_encoder,
 613                    ),
 614                PrimitiveBatch::PolychromeSprites { texture_id, range } => self
 615                    .draw_polychrome_sprites(
 616                        texture_id,
 617                        &scene.polychrome_sprites[range],
 618                        instance_buffer,
 619                        &mut instance_offset,
 620                        viewport_size,
 621                        command_encoder,
 622                    ),
 623                PrimitiveBatch::Surfaces(range) => self.draw_surfaces(
 624                    &scene.surfaces[range],
 625                    instance_buffer,
 626                    &mut instance_offset,
 627                    viewport_size,
 628                    command_encoder,
 629                ),
 630                PrimitiveBatch::SubpixelSprites { .. } => unreachable!(),
 631            };
 632            if !ok {
 633                command_encoder.end_encoding();
 634                anyhow::bail!(
 635                    "scene too large: {} paths, {} shadows, {} quads, {} underlines, {} mono, {} poly, {} surfaces",
 636                    scene.paths.len(),
 637                    scene.shadows.len(),
 638                    scene.quads.len(),
 639                    scene.underlines.len(),
 640                    scene.monochrome_sprites.len(),
 641                    scene.polychrome_sprites.len(),
 642                    scene.surfaces.len(),
 643                );
 644            }
 645        }
 646
 647        command_encoder.end_encoding();
 648
 649        instance_buffer.metal_buffer.did_modify_range(NSRange {
 650            location: 0,
 651            length: instance_offset as NSUInteger,
 652        });
 653        Ok(command_buffer.to_owned())
 654    }
 655
 656    fn draw_paths_to_intermediate(
 657        &self,
 658        paths: &[Path<ScaledPixels>],
 659        instance_buffer: &mut InstanceBuffer,
 660        instance_offset: &mut usize,
 661        viewport_size: Size<DevicePixels>,
 662        command_buffer: &metal::CommandBufferRef,
 663    ) -> bool {
 664        if paths.is_empty() {
 665            return true;
 666        }
 667        let Some(intermediate_texture) = &self.path_intermediate_texture else {
 668            return false;
 669        };
 670
 671        let render_pass_descriptor = metal::RenderPassDescriptor::new();
 672        let color_attachment = render_pass_descriptor
 673            .color_attachments()
 674            .object_at(0)
 675            .unwrap();
 676        color_attachment.set_load_action(metal::MTLLoadAction::Clear);
 677        color_attachment.set_clear_color(metal::MTLClearColor::new(0., 0., 0., 0.));
 678
 679        if let Some(msaa_texture) = &self.path_intermediate_msaa_texture {
 680            color_attachment.set_texture(Some(msaa_texture));
 681            color_attachment.set_resolve_texture(Some(intermediate_texture));
 682            color_attachment.set_store_action(metal::MTLStoreAction::MultisampleResolve);
 683        } else {
 684            color_attachment.set_texture(Some(intermediate_texture));
 685            color_attachment.set_store_action(metal::MTLStoreAction::Store);
 686        }
 687
 688        let command_encoder = command_buffer.new_render_command_encoder(render_pass_descriptor);
 689        command_encoder.set_render_pipeline_state(&self.paths_rasterization_pipeline_state);
 690
 691        align_offset(instance_offset);
 692        let mut vertices = Vec::new();
 693        for path in paths {
 694            vertices.extend(path.vertices.iter().map(|v| PathRasterizationVertex {
 695                xy_position: v.xy_position,
 696                st_position: v.st_position,
 697                color: path.color,
 698                bounds: path.bounds.intersect(&path.content_mask.bounds),
 699            }));
 700        }
 701        let vertices_bytes_len = mem::size_of_val(vertices.as_slice());
 702        let next_offset = *instance_offset + vertices_bytes_len;
 703        if next_offset > instance_buffer.size {
 704            command_encoder.end_encoding();
 705            return false;
 706        }
 707        command_encoder.set_vertex_buffer(
 708            PathRasterizationInputIndex::Vertices as u64,
 709            Some(&instance_buffer.metal_buffer),
 710            *instance_offset as u64,
 711        );
 712        command_encoder.set_vertex_bytes(
 713            PathRasterizationInputIndex::ViewportSize as u64,
 714            mem::size_of_val(&viewport_size) as u64,
 715            &viewport_size as *const Size<DevicePixels> as *const _,
 716        );
 717        command_encoder.set_fragment_buffer(
 718            PathRasterizationInputIndex::Vertices as u64,
 719            Some(&instance_buffer.metal_buffer),
 720            *instance_offset as u64,
 721        );
 722        let buffer_contents =
 723            unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
 724        unsafe {
 725            ptr::copy_nonoverlapping(
 726                vertices.as_ptr() as *const u8,
 727                buffer_contents,
 728                vertices_bytes_len,
 729            );
 730        }
 731        command_encoder.draw_primitives(
 732            metal::MTLPrimitiveType::Triangle,
 733            0,
 734            vertices.len() as u64,
 735        );
 736        *instance_offset = next_offset;
 737
 738        command_encoder.end_encoding();
 739        true
 740    }
 741
 742    fn draw_shadows(
 743        &self,
 744        shadows: &[Shadow],
 745        instance_buffer: &mut InstanceBuffer,
 746        instance_offset: &mut usize,
 747        viewport_size: Size<DevicePixels>,
 748        command_encoder: &metal::RenderCommandEncoderRef,
 749    ) -> bool {
 750        if shadows.is_empty() {
 751            return true;
 752        }
 753        align_offset(instance_offset);
 754
 755        command_encoder.set_render_pipeline_state(&self.shadows_pipeline_state);
 756        command_encoder.set_vertex_buffer(
 757            ShadowInputIndex::Vertices as u64,
 758            Some(&self.unit_vertices),
 759            0,
 760        );
 761        command_encoder.set_vertex_buffer(
 762            ShadowInputIndex::Shadows as u64,
 763            Some(&instance_buffer.metal_buffer),
 764            *instance_offset as u64,
 765        );
 766        command_encoder.set_fragment_buffer(
 767            ShadowInputIndex::Shadows as u64,
 768            Some(&instance_buffer.metal_buffer),
 769            *instance_offset as u64,
 770        );
 771
 772        command_encoder.set_vertex_bytes(
 773            ShadowInputIndex::ViewportSize as u64,
 774            mem::size_of_val(&viewport_size) as u64,
 775            &viewport_size as *const Size<DevicePixels> as *const _,
 776        );
 777
 778        let shadow_bytes_len = mem::size_of_val(shadows);
 779        let buffer_contents =
 780            unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
 781
 782        let next_offset = *instance_offset + shadow_bytes_len;
 783        if next_offset > instance_buffer.size {
 784            return false;
 785        }
 786
 787        unsafe {
 788            ptr::copy_nonoverlapping(
 789                shadows.as_ptr() as *const u8,
 790                buffer_contents,
 791                shadow_bytes_len,
 792            );
 793        }
 794
 795        command_encoder.draw_primitives_instanced(
 796            metal::MTLPrimitiveType::Triangle,
 797            0,
 798            6,
 799            shadows.len() as u64,
 800        );
 801        *instance_offset = next_offset;
 802        true
 803    }
 804
 805    fn draw_quads(
 806        &self,
 807        quads: &[Quad],
 808        instance_buffer: &mut InstanceBuffer,
 809        instance_offset: &mut usize,
 810        viewport_size: Size<DevicePixels>,
 811        command_encoder: &metal::RenderCommandEncoderRef,
 812    ) -> bool {
 813        if quads.is_empty() {
 814            return true;
 815        }
 816        align_offset(instance_offset);
 817
 818        command_encoder.set_render_pipeline_state(&self.quads_pipeline_state);
 819        command_encoder.set_vertex_buffer(
 820            QuadInputIndex::Vertices as u64,
 821            Some(&self.unit_vertices),
 822            0,
 823        );
 824        command_encoder.set_vertex_buffer(
 825            QuadInputIndex::Quads as u64,
 826            Some(&instance_buffer.metal_buffer),
 827            *instance_offset as u64,
 828        );
 829        command_encoder.set_fragment_buffer(
 830            QuadInputIndex::Quads as u64,
 831            Some(&instance_buffer.metal_buffer),
 832            *instance_offset as u64,
 833        );
 834
 835        command_encoder.set_vertex_bytes(
 836            QuadInputIndex::ViewportSize as u64,
 837            mem::size_of_val(&viewport_size) as u64,
 838            &viewport_size as *const Size<DevicePixels> as *const _,
 839        );
 840
 841        let quad_bytes_len = mem::size_of_val(quads);
 842        let buffer_contents =
 843            unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
 844
 845        let next_offset = *instance_offset + quad_bytes_len;
 846        if next_offset > instance_buffer.size {
 847            return false;
 848        }
 849
 850        unsafe {
 851            ptr::copy_nonoverlapping(quads.as_ptr() as *const u8, buffer_contents, quad_bytes_len);
 852        }
 853
 854        command_encoder.draw_primitives_instanced(
 855            metal::MTLPrimitiveType::Triangle,
 856            0,
 857            6,
 858            quads.len() as u64,
 859        );
 860        *instance_offset = next_offset;
 861        true
 862    }
 863
 864    fn draw_paths_from_intermediate(
 865        &self,
 866        paths: &[Path<ScaledPixels>],
 867        instance_buffer: &mut InstanceBuffer,
 868        instance_offset: &mut usize,
 869        viewport_size: Size<DevicePixels>,
 870        command_encoder: &metal::RenderCommandEncoderRef,
 871    ) -> bool {
 872        let Some(first_path) = paths.first() else {
 873            return true;
 874        };
 875
 876        let Some(ref intermediate_texture) = self.path_intermediate_texture else {
 877            return false;
 878        };
 879
 880        command_encoder.set_render_pipeline_state(&self.path_sprites_pipeline_state);
 881        command_encoder.set_vertex_buffer(
 882            SpriteInputIndex::Vertices as u64,
 883            Some(&self.unit_vertices),
 884            0,
 885        );
 886        command_encoder.set_vertex_bytes(
 887            SpriteInputIndex::ViewportSize as u64,
 888            mem::size_of_val(&viewport_size) as u64,
 889            &viewport_size as *const Size<DevicePixels> as *const _,
 890        );
 891
 892        command_encoder.set_fragment_texture(
 893            SpriteInputIndex::AtlasTexture as u64,
 894            Some(intermediate_texture),
 895        );
 896
 897        // When copying paths from the intermediate texture to the drawable,
 898        // each pixel must only be copied once, in case of transparent paths.
 899        //
 900        // If all paths have the same draw order, then their bounds are all
 901        // disjoint, so we can copy each path's bounds individually. If this
 902        // batch combines different draw orders, we perform a single copy
 903        // for a minimal spanning rect.
 904        let sprites;
 905        if paths.last().unwrap().order == first_path.order {
 906            sprites = paths
 907                .iter()
 908                .map(|path| PathSprite {
 909                    bounds: path.clipped_bounds(),
 910                })
 911                .collect();
 912        } else {
 913            let mut bounds = first_path.clipped_bounds();
 914            for path in paths.iter().skip(1) {
 915                bounds = bounds.union(&path.clipped_bounds());
 916            }
 917            sprites = vec![PathSprite { bounds }];
 918        }
 919
 920        align_offset(instance_offset);
 921        let sprite_bytes_len = mem::size_of_val(sprites.as_slice());
 922        let next_offset = *instance_offset + sprite_bytes_len;
 923        if next_offset > instance_buffer.size {
 924            return false;
 925        }
 926
 927        command_encoder.set_vertex_buffer(
 928            SpriteInputIndex::Sprites as u64,
 929            Some(&instance_buffer.metal_buffer),
 930            *instance_offset as u64,
 931        );
 932
 933        let buffer_contents =
 934            unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
 935        unsafe {
 936            ptr::copy_nonoverlapping(
 937                sprites.as_ptr() as *const u8,
 938                buffer_contents,
 939                sprite_bytes_len,
 940            );
 941        }
 942
 943        command_encoder.draw_primitives_instanced(
 944            metal::MTLPrimitiveType::Triangle,
 945            0,
 946            6,
 947            sprites.len() as u64,
 948        );
 949        *instance_offset = next_offset;
 950
 951        true
 952    }
 953
 954    fn draw_underlines(
 955        &self,
 956        underlines: &[Underline],
 957        instance_buffer: &mut InstanceBuffer,
 958        instance_offset: &mut usize,
 959        viewport_size: Size<DevicePixels>,
 960        command_encoder: &metal::RenderCommandEncoderRef,
 961    ) -> bool {
 962        if underlines.is_empty() {
 963            return true;
 964        }
 965        align_offset(instance_offset);
 966
 967        command_encoder.set_render_pipeline_state(&self.underlines_pipeline_state);
 968        command_encoder.set_vertex_buffer(
 969            UnderlineInputIndex::Vertices as u64,
 970            Some(&self.unit_vertices),
 971            0,
 972        );
 973        command_encoder.set_vertex_buffer(
 974            UnderlineInputIndex::Underlines as u64,
 975            Some(&instance_buffer.metal_buffer),
 976            *instance_offset as u64,
 977        );
 978        command_encoder.set_fragment_buffer(
 979            UnderlineInputIndex::Underlines as u64,
 980            Some(&instance_buffer.metal_buffer),
 981            *instance_offset as u64,
 982        );
 983
 984        command_encoder.set_vertex_bytes(
 985            UnderlineInputIndex::ViewportSize as u64,
 986            mem::size_of_val(&viewport_size) as u64,
 987            &viewport_size as *const Size<DevicePixels> as *const _,
 988        );
 989
 990        let underline_bytes_len = mem::size_of_val(underlines);
 991        let buffer_contents =
 992            unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
 993
 994        let next_offset = *instance_offset + underline_bytes_len;
 995        if next_offset > instance_buffer.size {
 996            return false;
 997        }
 998
 999        unsafe {
1000            ptr::copy_nonoverlapping(
1001                underlines.as_ptr() as *const u8,
1002                buffer_contents,
1003                underline_bytes_len,
1004            );
1005        }
1006
1007        command_encoder.draw_primitives_instanced(
1008            metal::MTLPrimitiveType::Triangle,
1009            0,
1010            6,
1011            underlines.len() as u64,
1012        );
1013        *instance_offset = next_offset;
1014        true
1015    }
1016
1017    fn draw_monochrome_sprites(
1018        &self,
1019        texture_id: AtlasTextureId,
1020        sprites: &[MonochromeSprite],
1021        instance_buffer: &mut InstanceBuffer,
1022        instance_offset: &mut usize,
1023        viewport_size: Size<DevicePixels>,
1024        command_encoder: &metal::RenderCommandEncoderRef,
1025    ) -> bool {
1026        if sprites.is_empty() {
1027            return true;
1028        }
1029        align_offset(instance_offset);
1030
1031        let sprite_bytes_len = mem::size_of_val(sprites);
1032        let buffer_contents =
1033            unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
1034
1035        let next_offset = *instance_offset + sprite_bytes_len;
1036        if next_offset > instance_buffer.size {
1037            return false;
1038        }
1039
1040        let texture = self.sprite_atlas.metal_texture(texture_id);
1041        let texture_size = size(
1042            DevicePixels(texture.width() as i32),
1043            DevicePixels(texture.height() as i32),
1044        );
1045        command_encoder.set_render_pipeline_state(&self.monochrome_sprites_pipeline_state);
1046        command_encoder.set_vertex_buffer(
1047            SpriteInputIndex::Vertices as u64,
1048            Some(&self.unit_vertices),
1049            0,
1050        );
1051        command_encoder.set_vertex_buffer(
1052            SpriteInputIndex::Sprites as u64,
1053            Some(&instance_buffer.metal_buffer),
1054            *instance_offset as u64,
1055        );
1056        command_encoder.set_vertex_bytes(
1057            SpriteInputIndex::ViewportSize as u64,
1058            mem::size_of_val(&viewport_size) as u64,
1059            &viewport_size as *const Size<DevicePixels> as *const _,
1060        );
1061        command_encoder.set_vertex_bytes(
1062            SpriteInputIndex::AtlasTextureSize as u64,
1063            mem::size_of_val(&texture_size) as u64,
1064            &texture_size as *const Size<DevicePixels> as *const _,
1065        );
1066        command_encoder.set_fragment_buffer(
1067            SpriteInputIndex::Sprites as u64,
1068            Some(&instance_buffer.metal_buffer),
1069            *instance_offset as u64,
1070        );
1071        command_encoder.set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
1072
1073        unsafe {
1074            ptr::copy_nonoverlapping(
1075                sprites.as_ptr() as *const u8,
1076                buffer_contents,
1077                sprite_bytes_len,
1078            );
1079        }
1080
1081        command_encoder.draw_primitives_instanced(
1082            metal::MTLPrimitiveType::Triangle,
1083            0,
1084            6,
1085            sprites.len() as u64,
1086        );
1087        *instance_offset = next_offset;
1088        true
1089    }
1090
1091    fn draw_polychrome_sprites(
1092        &self,
1093        texture_id: AtlasTextureId,
1094        sprites: &[PolychromeSprite],
1095        instance_buffer: &mut InstanceBuffer,
1096        instance_offset: &mut usize,
1097        viewport_size: Size<DevicePixels>,
1098        command_encoder: &metal::RenderCommandEncoderRef,
1099    ) -> bool {
1100        if sprites.is_empty() {
1101            return true;
1102        }
1103        align_offset(instance_offset);
1104
1105        let texture = self.sprite_atlas.metal_texture(texture_id);
1106        let texture_size = size(
1107            DevicePixels(texture.width() as i32),
1108            DevicePixels(texture.height() as i32),
1109        );
1110        command_encoder.set_render_pipeline_state(&self.polychrome_sprites_pipeline_state);
1111        command_encoder.set_vertex_buffer(
1112            SpriteInputIndex::Vertices as u64,
1113            Some(&self.unit_vertices),
1114            0,
1115        );
1116        command_encoder.set_vertex_buffer(
1117            SpriteInputIndex::Sprites as u64,
1118            Some(&instance_buffer.metal_buffer),
1119            *instance_offset as u64,
1120        );
1121        command_encoder.set_vertex_bytes(
1122            SpriteInputIndex::ViewportSize as u64,
1123            mem::size_of_val(&viewport_size) as u64,
1124            &viewport_size as *const Size<DevicePixels> as *const _,
1125        );
1126        command_encoder.set_vertex_bytes(
1127            SpriteInputIndex::AtlasTextureSize as u64,
1128            mem::size_of_val(&texture_size) as u64,
1129            &texture_size as *const Size<DevicePixels> as *const _,
1130        );
1131        command_encoder.set_fragment_buffer(
1132            SpriteInputIndex::Sprites as u64,
1133            Some(&instance_buffer.metal_buffer),
1134            *instance_offset as u64,
1135        );
1136        command_encoder.set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
1137
1138        let sprite_bytes_len = mem::size_of_val(sprites);
1139        let buffer_contents =
1140            unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
1141
1142        let next_offset = *instance_offset + sprite_bytes_len;
1143        if next_offset > instance_buffer.size {
1144            return false;
1145        }
1146
1147        unsafe {
1148            ptr::copy_nonoverlapping(
1149                sprites.as_ptr() as *const u8,
1150                buffer_contents,
1151                sprite_bytes_len,
1152            );
1153        }
1154
1155        command_encoder.draw_primitives_instanced(
1156            metal::MTLPrimitiveType::Triangle,
1157            0,
1158            6,
1159            sprites.len() as u64,
1160        );
1161        *instance_offset = next_offset;
1162        true
1163    }
1164
1165    fn draw_surfaces(
1166        &mut self,
1167        surfaces: &[PaintSurface],
1168        instance_buffer: &mut InstanceBuffer,
1169        instance_offset: &mut usize,
1170        viewport_size: Size<DevicePixels>,
1171        command_encoder: &metal::RenderCommandEncoderRef,
1172    ) -> bool {
1173        command_encoder.set_render_pipeline_state(&self.surfaces_pipeline_state);
1174        command_encoder.set_vertex_buffer(
1175            SurfaceInputIndex::Vertices as u64,
1176            Some(&self.unit_vertices),
1177            0,
1178        );
1179        command_encoder.set_vertex_bytes(
1180            SurfaceInputIndex::ViewportSize as u64,
1181            mem::size_of_val(&viewport_size) as u64,
1182            &viewport_size as *const Size<DevicePixels> as *const _,
1183        );
1184
1185        for surface in surfaces {
1186            let texture_size = size(
1187                DevicePixels::from(surface.image_buffer.get_width() as i32),
1188                DevicePixels::from(surface.image_buffer.get_height() as i32),
1189            );
1190
1191            assert_eq!(
1192                surface.image_buffer.get_pixel_format(),
1193                kCVPixelFormatType_420YpCbCr8BiPlanarFullRange
1194            );
1195
1196            let y_texture = self
1197                .core_video_texture_cache
1198                .create_texture_from_image(
1199                    surface.image_buffer.as_concrete_TypeRef(),
1200                    None,
1201                    MTLPixelFormat::R8Unorm,
1202                    surface.image_buffer.get_width_of_plane(0),
1203                    surface.image_buffer.get_height_of_plane(0),
1204                    0,
1205                )
1206                .unwrap();
1207            let cb_cr_texture = self
1208                .core_video_texture_cache
1209                .create_texture_from_image(
1210                    surface.image_buffer.as_concrete_TypeRef(),
1211                    None,
1212                    MTLPixelFormat::RG8Unorm,
1213                    surface.image_buffer.get_width_of_plane(1),
1214                    surface.image_buffer.get_height_of_plane(1),
1215                    1,
1216                )
1217                .unwrap();
1218
1219            align_offset(instance_offset);
1220            let next_offset = *instance_offset + mem::size_of::<Surface>();
1221            if next_offset > instance_buffer.size {
1222                return false;
1223            }
1224
1225            command_encoder.set_vertex_buffer(
1226                SurfaceInputIndex::Surfaces as u64,
1227                Some(&instance_buffer.metal_buffer),
1228                *instance_offset as u64,
1229            );
1230            command_encoder.set_vertex_bytes(
1231                SurfaceInputIndex::TextureSize as u64,
1232                mem::size_of_val(&texture_size) as u64,
1233                &texture_size as *const Size<DevicePixels> as *const _,
1234            );
1235            // let y_texture = y_texture.get_texture().unwrap().
1236            command_encoder.set_fragment_texture(SurfaceInputIndex::YTexture as u64, unsafe {
1237                let texture = CVMetalTextureGetTexture(y_texture.as_concrete_TypeRef());
1238                Some(metal::TextureRef::from_ptr(texture as *mut _))
1239            });
1240            command_encoder.set_fragment_texture(SurfaceInputIndex::CbCrTexture as u64, unsafe {
1241                let texture = CVMetalTextureGetTexture(cb_cr_texture.as_concrete_TypeRef());
1242                Some(metal::TextureRef::from_ptr(texture as *mut _))
1243            });
1244
1245            unsafe {
1246                let buffer_contents = (instance_buffer.metal_buffer.contents() as *mut u8)
1247                    .add(*instance_offset)
1248                    as *mut SurfaceBounds;
1249                ptr::write(
1250                    buffer_contents,
1251                    SurfaceBounds {
1252                        bounds: surface.bounds,
1253                        content_mask: surface.content_mask.clone(),
1254                    },
1255                );
1256            }
1257
1258            command_encoder.draw_primitives(metal::MTLPrimitiveType::Triangle, 0, 6);
1259            *instance_offset = next_offset;
1260        }
1261        true
1262    }
1263}
1264
1265fn new_command_encoder<'a>(
1266    command_buffer: &'a metal::CommandBufferRef,
1267    drawable: &'a metal::MetalDrawableRef,
1268    viewport_size: Size<DevicePixels>,
1269    configure_color_attachment: impl Fn(&RenderPassColorAttachmentDescriptorRef),
1270) -> &'a metal::RenderCommandEncoderRef {
1271    let render_pass_descriptor = metal::RenderPassDescriptor::new();
1272    let color_attachment = render_pass_descriptor
1273        .color_attachments()
1274        .object_at(0)
1275        .unwrap();
1276    color_attachment.set_texture(Some(drawable.texture()));
1277    color_attachment.set_store_action(metal::MTLStoreAction::Store);
1278    configure_color_attachment(color_attachment);
1279
1280    let command_encoder = command_buffer.new_render_command_encoder(render_pass_descriptor);
1281    command_encoder.set_viewport(metal::MTLViewport {
1282        originX: 0.0,
1283        originY: 0.0,
1284        width: i32::from(viewport_size.width) as f64,
1285        height: i32::from(viewport_size.height) as f64,
1286        znear: 0.0,
1287        zfar: 1.0,
1288    });
1289    command_encoder
1290}
1291
1292fn build_pipeline_state(
1293    device: &metal::DeviceRef,
1294    library: &metal::LibraryRef,
1295    label: &str,
1296    vertex_fn_name: &str,
1297    fragment_fn_name: &str,
1298    pixel_format: metal::MTLPixelFormat,
1299) -> metal::RenderPipelineState {
1300    let vertex_fn = library
1301        .get_function(vertex_fn_name, None)
1302        .expect("error locating vertex function");
1303    let fragment_fn = library
1304        .get_function(fragment_fn_name, None)
1305        .expect("error locating fragment function");
1306
1307    let descriptor = metal::RenderPipelineDescriptor::new();
1308    descriptor.set_label(label);
1309    descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
1310    descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
1311    let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
1312    color_attachment.set_pixel_format(pixel_format);
1313    color_attachment.set_blending_enabled(true);
1314    color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
1315    color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
1316    color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::SourceAlpha);
1317    color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::One);
1318    color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
1319    color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::One);
1320
1321    device
1322        .new_render_pipeline_state(&descriptor)
1323        .expect("could not create render pipeline state")
1324}
1325
1326fn build_path_sprite_pipeline_state(
1327    device: &metal::DeviceRef,
1328    library: &metal::LibraryRef,
1329    label: &str,
1330    vertex_fn_name: &str,
1331    fragment_fn_name: &str,
1332    pixel_format: metal::MTLPixelFormat,
1333) -> metal::RenderPipelineState {
1334    let vertex_fn = library
1335        .get_function(vertex_fn_name, None)
1336        .expect("error locating vertex function");
1337    let fragment_fn = library
1338        .get_function(fragment_fn_name, None)
1339        .expect("error locating fragment function");
1340
1341    let descriptor = metal::RenderPipelineDescriptor::new();
1342    descriptor.set_label(label);
1343    descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
1344    descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
1345    let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
1346    color_attachment.set_pixel_format(pixel_format);
1347    color_attachment.set_blending_enabled(true);
1348    color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
1349    color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
1350    color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::One);
1351    color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::One);
1352    color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
1353    color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::One);
1354
1355    device
1356        .new_render_pipeline_state(&descriptor)
1357        .expect("could not create render pipeline state")
1358}
1359
1360fn build_path_rasterization_pipeline_state(
1361    device: &metal::DeviceRef,
1362    library: &metal::LibraryRef,
1363    label: &str,
1364    vertex_fn_name: &str,
1365    fragment_fn_name: &str,
1366    pixel_format: metal::MTLPixelFormat,
1367    path_sample_count: u32,
1368) -> metal::RenderPipelineState {
1369    let vertex_fn = library
1370        .get_function(vertex_fn_name, None)
1371        .expect("error locating vertex function");
1372    let fragment_fn = library
1373        .get_function(fragment_fn_name, None)
1374        .expect("error locating fragment function");
1375
1376    let descriptor = metal::RenderPipelineDescriptor::new();
1377    descriptor.set_label(label);
1378    descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
1379    descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
1380    if path_sample_count > 1 {
1381        descriptor.set_raster_sample_count(path_sample_count as _);
1382        descriptor.set_alpha_to_coverage_enabled(false);
1383    }
1384    let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
1385    color_attachment.set_pixel_format(pixel_format);
1386    color_attachment.set_blending_enabled(true);
1387    color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
1388    color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
1389    color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::One);
1390    color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::One);
1391    color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
1392    color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
1393
1394    device
1395        .new_render_pipeline_state(&descriptor)
1396        .expect("could not create render pipeline state")
1397}
1398
1399// Align to multiples of 256 make Metal happy.
1400fn align_offset(offset: &mut usize) {
1401    *offset = (*offset).div_ceil(256) * 256;
1402}
1403
1404#[repr(C)]
1405enum ShadowInputIndex {
1406    Vertices = 0,
1407    Shadows = 1,
1408    ViewportSize = 2,
1409}
1410
1411#[repr(C)]
1412enum QuadInputIndex {
1413    Vertices = 0,
1414    Quads = 1,
1415    ViewportSize = 2,
1416}
1417
1418#[repr(C)]
1419enum UnderlineInputIndex {
1420    Vertices = 0,
1421    Underlines = 1,
1422    ViewportSize = 2,
1423}
1424
1425#[repr(C)]
1426enum SpriteInputIndex {
1427    Vertices = 0,
1428    Sprites = 1,
1429    ViewportSize = 2,
1430    AtlasTextureSize = 3,
1431    AtlasTexture = 4,
1432}
1433
1434#[repr(C)]
1435enum SurfaceInputIndex {
1436    Vertices = 0,
1437    Surfaces = 1,
1438    ViewportSize = 2,
1439    TextureSize = 3,
1440    YTexture = 4,
1441    CbCrTexture = 5,
1442}
1443
1444#[repr(C)]
1445enum PathRasterizationInputIndex {
1446    Vertices = 0,
1447    ViewportSize = 1,
1448}
1449
1450#[derive(Clone, Debug, Eq, PartialEq)]
1451#[repr(C)]
1452pub struct PathSprite {
1453    pub bounds: Bounds<ScaledPixels>,
1454}
1455
1456#[derive(Clone, Debug, Eq, PartialEq)]
1457#[repr(C)]
1458pub struct SurfaceBounds {
1459    pub bounds: Bounds<ScaledPixels>,
1460    pub content_mask: ContentMask<ScaledPixels>,
1461}