metal_renderer.rs

   1use super::metal_atlas::MetalAtlas;
   2use crate::{
   3    AtlasTextureId, Background, Bounds, ContentMask, DevicePixels, MonochromeSprite, PaintSurface,
   4    Path, Point, PolychromeSprite, PrimitiveBatch, Quad, ScaledPixels, Scene, Shadow, Size,
   5    Surface, Underline, point, size,
   6};
   7use anyhow::Result;
   8use block::ConcreteBlock;
   9use cocoa::{
  10    base::{NO, YES},
  11    foundation::{NSSize, NSUInteger},
  12    quartzcore::AutoresizingMask,
  13};
  14#[cfg(any(test, feature = "test-support"))]
  15use image::RgbaImage;
  16
  17use core_foundation::base::TCFType;
  18use core_video::{
  19    metal_texture::CVMetalTextureGetTexture, metal_texture_cache::CVMetalTextureCache,
  20    pixel_buffer::kCVPixelFormatType_420YpCbCr8BiPlanarFullRange,
  21};
  22use foreign_types::{ForeignType, ForeignTypeRef};
  23use metal::{
  24    CAMetalLayer, CommandQueue, MTLPixelFormat, MTLResourceOptions, NSRange,
  25    RenderPassColorAttachmentDescriptorRef,
  26};
  27use objc::{self, msg_send, sel, sel_impl};
  28use parking_lot::Mutex;
  29
  30use std::{cell::Cell, ffi::c_void, mem, ptr, sync::Arc};
  31
  32// Exported to metal
  33pub(crate) type PointF = crate::Point<f32>;
  34
  35#[cfg(not(feature = "runtime_shaders"))]
  36const SHADERS_METALLIB: &[u8] = include_bytes!(concat!(env!("OUT_DIR"), "/shaders.metallib"));
  37#[cfg(feature = "runtime_shaders")]
  38const SHADERS_SOURCE_FILE: &str = include_str!(concat!(env!("OUT_DIR"), "/stitched_shaders.metal"));
  39// Use 4x MSAA, all devices support it.
  40// https://developer.apple.com/documentation/metal/mtldevice/1433355-supportstexturesamplecount
  41const PATH_SAMPLE_COUNT: u32 = 4;
  42
  43pub type Context = Arc<Mutex<InstanceBufferPool>>;
  44pub type Renderer = MetalRenderer;
  45
  46pub unsafe fn new_renderer(
  47    context: self::Context,
  48    _native_window: *mut c_void,
  49    _native_view: *mut c_void,
  50    _bounds: crate::Size<f32>,
  51    _transparent: bool,
  52) -> Renderer {
  53    MetalRenderer::new(context)
  54}
  55
  56pub(crate) struct InstanceBufferPool {
  57    buffer_size: usize,
  58    buffers: Vec<metal::Buffer>,
  59}
  60
  61impl Default for InstanceBufferPool {
  62    fn default() -> Self {
  63        Self {
  64            buffer_size: 2 * 1024 * 1024,
  65            buffers: Vec::new(),
  66        }
  67    }
  68}
  69
  70pub(crate) struct InstanceBuffer {
  71    metal_buffer: metal::Buffer,
  72    size: usize,
  73}
  74
  75impl InstanceBufferPool {
  76    pub(crate) fn reset(&mut self, buffer_size: usize) {
  77        self.buffer_size = buffer_size;
  78        self.buffers.clear();
  79    }
  80
  81    pub(crate) fn acquire(&mut self, device: &metal::Device) -> InstanceBuffer {
  82        let buffer = self.buffers.pop().unwrap_or_else(|| {
  83            device.new_buffer(
  84                self.buffer_size as u64,
  85                MTLResourceOptions::StorageModeManaged,
  86            )
  87        });
  88        InstanceBuffer {
  89            metal_buffer: buffer,
  90            size: self.buffer_size,
  91        }
  92    }
  93
  94    pub(crate) fn release(&mut self, buffer: InstanceBuffer) {
  95        if buffer.size == self.buffer_size {
  96            self.buffers.push(buffer.metal_buffer)
  97        }
  98    }
  99}
 100
 101pub(crate) struct MetalRenderer {
 102    device: metal::Device,
 103    layer: metal::MetalLayer,
 104    presents_with_transaction: bool,
 105    command_queue: CommandQueue,
 106    paths_rasterization_pipeline_state: metal::RenderPipelineState,
 107    path_sprites_pipeline_state: metal::RenderPipelineState,
 108    shadows_pipeline_state: metal::RenderPipelineState,
 109    quads_pipeline_state: metal::RenderPipelineState,
 110    underlines_pipeline_state: metal::RenderPipelineState,
 111    monochrome_sprites_pipeline_state: metal::RenderPipelineState,
 112    polychrome_sprites_pipeline_state: metal::RenderPipelineState,
 113    surfaces_pipeline_state: metal::RenderPipelineState,
 114    unit_vertices: metal::Buffer,
 115    #[allow(clippy::arc_with_non_send_sync)]
 116    instance_buffer_pool: Arc<Mutex<InstanceBufferPool>>,
 117    sprite_atlas: Arc<MetalAtlas>,
 118    core_video_texture_cache: core_video::metal_texture_cache::CVMetalTextureCache,
 119    path_intermediate_texture: Option<metal::Texture>,
 120    path_intermediate_msaa_texture: Option<metal::Texture>,
 121    path_sample_count: u32,
 122}
 123
 124#[repr(C)]
 125pub struct PathRasterizationVertex {
 126    pub xy_position: Point<ScaledPixels>,
 127    pub st_position: Point<f32>,
 128    pub color: Background,
 129    pub bounds: Bounds<ScaledPixels>,
 130}
 131
 132impl MetalRenderer {
 133    pub fn new(instance_buffer_pool: Arc<Mutex<InstanceBufferPool>>) -> Self {
 134        // Prefer low‐power integrated GPUs on Intel Mac. On Apple
 135        // Silicon, there is only ever one GPU, so this is equivalent to
 136        // `metal::Device::system_default()`.
 137        let device = if let Some(d) = metal::Device::all()
 138            .into_iter()
 139            .min_by_key(|d| (d.is_removable(), !d.is_low_power()))
 140        {
 141            d
 142        } else {
 143            // For some reason `all()` can return an empty list, see https://github.com/zed-industries/zed/issues/37689
 144            // In that case, we fall back to the system default device.
 145            log::error!(
 146                "Unable to enumerate Metal devices; attempting to use system default device"
 147            );
 148            metal::Device::system_default().unwrap_or_else(|| {
 149                log::error!("unable to access a compatible graphics device");
 150                std::process::exit(1);
 151            })
 152        };
 153
 154        let layer = metal::MetalLayer::new();
 155        layer.set_device(&device);
 156        layer.set_pixel_format(MTLPixelFormat::BGRA8Unorm);
 157        layer.set_opaque(false);
 158        layer.set_maximum_drawable_count(3);
 159        // Allow texture reading for visual tests (captures screenshots without ScreenCaptureKit)
 160        #[cfg(any(test, feature = "test-support"))]
 161        layer.set_framebuffer_only(false);
 162        unsafe {
 163            let _: () = msg_send![&*layer, setAllowsNextDrawableTimeout: NO];
 164            let _: () = msg_send![&*layer, setNeedsDisplayOnBoundsChange: YES];
 165            let _: () = msg_send![
 166                &*layer,
 167                setAutoresizingMask: AutoresizingMask::WIDTH_SIZABLE
 168                    | AutoresizingMask::HEIGHT_SIZABLE
 169            ];
 170        }
 171        #[cfg(feature = "runtime_shaders")]
 172        let library = device
 173            .new_library_with_source(&SHADERS_SOURCE_FILE, &metal::CompileOptions::new())
 174            .expect("error building metal library");
 175        #[cfg(not(feature = "runtime_shaders"))]
 176        let library = device
 177            .new_library_with_data(SHADERS_METALLIB)
 178            .expect("error building metal library");
 179
 180        fn to_float2_bits(point: PointF) -> u64 {
 181            let mut output = point.y.to_bits() as u64;
 182            output <<= 32;
 183            output |= point.x.to_bits() as u64;
 184            output
 185        }
 186
 187        let unit_vertices = [
 188            to_float2_bits(point(0., 0.)),
 189            to_float2_bits(point(1., 0.)),
 190            to_float2_bits(point(0., 1.)),
 191            to_float2_bits(point(0., 1.)),
 192            to_float2_bits(point(1., 0.)),
 193            to_float2_bits(point(1., 1.)),
 194        ];
 195        let unit_vertices = device.new_buffer_with_data(
 196            unit_vertices.as_ptr() as *const c_void,
 197            mem::size_of_val(&unit_vertices) as u64,
 198            MTLResourceOptions::StorageModeManaged,
 199        );
 200
 201        let paths_rasterization_pipeline_state = build_path_rasterization_pipeline_state(
 202            &device,
 203            &library,
 204            "paths_rasterization",
 205            "path_rasterization_vertex",
 206            "path_rasterization_fragment",
 207            MTLPixelFormat::BGRA8Unorm,
 208            PATH_SAMPLE_COUNT,
 209        );
 210        let path_sprites_pipeline_state = build_path_sprite_pipeline_state(
 211            &device,
 212            &library,
 213            "path_sprites",
 214            "path_sprite_vertex",
 215            "path_sprite_fragment",
 216            MTLPixelFormat::BGRA8Unorm,
 217        );
 218        let shadows_pipeline_state = build_pipeline_state(
 219            &device,
 220            &library,
 221            "shadows",
 222            "shadow_vertex",
 223            "shadow_fragment",
 224            MTLPixelFormat::BGRA8Unorm,
 225        );
 226        let quads_pipeline_state = build_pipeline_state(
 227            &device,
 228            &library,
 229            "quads",
 230            "quad_vertex",
 231            "quad_fragment",
 232            MTLPixelFormat::BGRA8Unorm,
 233        );
 234        let underlines_pipeline_state = build_pipeline_state(
 235            &device,
 236            &library,
 237            "underlines",
 238            "underline_vertex",
 239            "underline_fragment",
 240            MTLPixelFormat::BGRA8Unorm,
 241        );
 242        let monochrome_sprites_pipeline_state = build_pipeline_state(
 243            &device,
 244            &library,
 245            "monochrome_sprites",
 246            "monochrome_sprite_vertex",
 247            "monochrome_sprite_fragment",
 248            MTLPixelFormat::BGRA8Unorm,
 249        );
 250        let polychrome_sprites_pipeline_state = build_pipeline_state(
 251            &device,
 252            &library,
 253            "polychrome_sprites",
 254            "polychrome_sprite_vertex",
 255            "polychrome_sprite_fragment",
 256            MTLPixelFormat::BGRA8Unorm,
 257        );
 258        let surfaces_pipeline_state = build_pipeline_state(
 259            &device,
 260            &library,
 261            "surfaces",
 262            "surface_vertex",
 263            "surface_fragment",
 264            MTLPixelFormat::BGRA8Unorm,
 265        );
 266
 267        let command_queue = device.new_command_queue();
 268        let sprite_atlas = Arc::new(MetalAtlas::new(device.clone()));
 269        let core_video_texture_cache =
 270            CVMetalTextureCache::new(None, device.clone(), None).unwrap();
 271
 272        Self {
 273            device,
 274            layer,
 275            presents_with_transaction: false,
 276            command_queue,
 277            paths_rasterization_pipeline_state,
 278            path_sprites_pipeline_state,
 279            shadows_pipeline_state,
 280            quads_pipeline_state,
 281            underlines_pipeline_state,
 282            monochrome_sprites_pipeline_state,
 283            polychrome_sprites_pipeline_state,
 284            surfaces_pipeline_state,
 285            unit_vertices,
 286            instance_buffer_pool,
 287            sprite_atlas,
 288            core_video_texture_cache,
 289            path_intermediate_texture: None,
 290            path_intermediate_msaa_texture: None,
 291            path_sample_count: PATH_SAMPLE_COUNT,
 292        }
 293    }
 294
 295    pub fn layer(&self) -> &metal::MetalLayerRef {
 296        &self.layer
 297    }
 298
 299    pub fn layer_ptr(&self) -> *mut CAMetalLayer {
 300        self.layer.as_ptr()
 301    }
 302
 303    pub fn sprite_atlas(&self) -> &Arc<MetalAtlas> {
 304        &self.sprite_atlas
 305    }
 306
 307    pub fn set_presents_with_transaction(&mut self, presents_with_transaction: bool) {
 308        self.presents_with_transaction = presents_with_transaction;
 309        self.layer
 310            .set_presents_with_transaction(presents_with_transaction);
 311    }
 312
 313    pub fn update_drawable_size(&mut self, size: Size<DevicePixels>) {
 314        let size = NSSize {
 315            width: size.width.0 as f64,
 316            height: size.height.0 as f64,
 317        };
 318        unsafe {
 319            let _: () = msg_send![
 320                self.layer(),
 321                setDrawableSize: size
 322            ];
 323        }
 324        let device_pixels_size = Size {
 325            width: DevicePixels(size.width as i32),
 326            height: DevicePixels(size.height as i32),
 327        };
 328        self.update_path_intermediate_textures(device_pixels_size);
 329    }
 330
 331    fn update_path_intermediate_textures(&mut self, size: Size<DevicePixels>) {
 332        // We are uncertain when this happens, but sometimes size can be 0 here. Most likely before
 333        // the layout pass on window creation. Zero-sized texture creation causes SIGABRT.
 334        // https://github.com/zed-industries/zed/issues/36229
 335        if size.width.0 <= 0 || size.height.0 <= 0 {
 336            self.path_intermediate_texture = None;
 337            self.path_intermediate_msaa_texture = None;
 338            return;
 339        }
 340
 341        let texture_descriptor = metal::TextureDescriptor::new();
 342        texture_descriptor.set_width(size.width.0 as u64);
 343        texture_descriptor.set_height(size.height.0 as u64);
 344        texture_descriptor.set_pixel_format(metal::MTLPixelFormat::BGRA8Unorm);
 345        texture_descriptor
 346            .set_usage(metal::MTLTextureUsage::RenderTarget | metal::MTLTextureUsage::ShaderRead);
 347        self.path_intermediate_texture = Some(self.device.new_texture(&texture_descriptor));
 348
 349        if self.path_sample_count > 1 {
 350            let mut msaa_descriptor = texture_descriptor;
 351            msaa_descriptor.set_texture_type(metal::MTLTextureType::D2Multisample);
 352            msaa_descriptor.set_storage_mode(metal::MTLStorageMode::Private);
 353            msaa_descriptor.set_sample_count(self.path_sample_count as _);
 354            self.path_intermediate_msaa_texture = Some(self.device.new_texture(&msaa_descriptor));
 355        } else {
 356            self.path_intermediate_msaa_texture = None;
 357        }
 358    }
 359
 360    pub fn update_transparency(&self, _transparent: bool) {
 361        // todo(mac)?
 362    }
 363
 364    pub fn destroy(&self) {
 365        // nothing to do
 366    }
 367
 368    pub fn draw(&mut self, scene: &Scene) {
 369        let layer = self.layer.clone();
 370        let viewport_size = layer.drawable_size();
 371        let viewport_size: Size<DevicePixels> = size(
 372            (viewport_size.width.ceil() as i32).into(),
 373            (viewport_size.height.ceil() as i32).into(),
 374        );
 375        let drawable = if let Some(drawable) = layer.next_drawable() {
 376            drawable
 377        } else {
 378            log::error!(
 379                "failed to retrieve next drawable, drawable size: {:?}",
 380                viewport_size
 381            );
 382            return;
 383        };
 384
 385        loop {
 386            let mut instance_buffer = self.instance_buffer_pool.lock().acquire(&self.device);
 387
 388            let command_buffer =
 389                self.draw_primitives(scene, &mut instance_buffer, drawable, viewport_size);
 390
 391            match command_buffer {
 392                Ok(command_buffer) => {
 393                    let instance_buffer_pool = self.instance_buffer_pool.clone();
 394                    let instance_buffer = Cell::new(Some(instance_buffer));
 395                    let block = ConcreteBlock::new(move |_| {
 396                        if let Some(instance_buffer) = instance_buffer.take() {
 397                            instance_buffer_pool.lock().release(instance_buffer);
 398                        }
 399                    });
 400                    let block = block.copy();
 401                    command_buffer.add_completed_handler(&block);
 402
 403                    if self.presents_with_transaction {
 404                        command_buffer.commit();
 405                        command_buffer.wait_until_scheduled();
 406                        drawable.present();
 407                    } else {
 408                        command_buffer.present_drawable(drawable);
 409                        command_buffer.commit();
 410                    }
 411                    return;
 412                }
 413                Err(err) => {
 414                    log::error!(
 415                        "failed to render: {}. retrying with larger instance buffer size",
 416                        err
 417                    );
 418                    let mut instance_buffer_pool = self.instance_buffer_pool.lock();
 419                    let buffer_size = instance_buffer_pool.buffer_size;
 420                    if buffer_size >= 256 * 1024 * 1024 {
 421                        log::error!("instance buffer size grew too large: {}", buffer_size);
 422                        break;
 423                    }
 424                    instance_buffer_pool.reset(buffer_size * 2);
 425                    log::info!(
 426                        "increased instance buffer size to {}",
 427                        instance_buffer_pool.buffer_size
 428                    );
 429                }
 430            }
 431        }
 432    }
 433
 434    /// Renders the scene to a texture and returns the pixel data as an RGBA image.
 435    /// This does not present the frame to screen - useful for visual testing
 436    /// where we want to capture what would be rendered without displaying it.
 437    #[cfg(any(test, feature = "test-support"))]
 438    pub fn render_to_image(&mut self, scene: &Scene) -> Result<RgbaImage> {
 439        let layer = self.layer.clone();
 440        let viewport_size = layer.drawable_size();
 441        let viewport_size: Size<DevicePixels> = size(
 442            (viewport_size.width.ceil() as i32).into(),
 443            (viewport_size.height.ceil() as i32).into(),
 444        );
 445        let drawable = layer
 446            .next_drawable()
 447            .ok_or_else(|| anyhow::anyhow!("Failed to get drawable for render_to_image"))?;
 448
 449        loop {
 450            let mut instance_buffer = self.instance_buffer_pool.lock().acquire(&self.device);
 451
 452            let command_buffer =
 453                self.draw_primitives(scene, &mut instance_buffer, drawable, viewport_size);
 454
 455            match command_buffer {
 456                Ok(command_buffer) => {
 457                    let instance_buffer_pool = self.instance_buffer_pool.clone();
 458                    let instance_buffer = Cell::new(Some(instance_buffer));
 459                    let block = ConcreteBlock::new(move |_| {
 460                        if let Some(instance_buffer) = instance_buffer.take() {
 461                            instance_buffer_pool.lock().release(instance_buffer);
 462                        }
 463                    });
 464                    let block = block.copy();
 465                    command_buffer.add_completed_handler(&block);
 466
 467                    // Commit and wait for completion without presenting
 468                    command_buffer.commit();
 469                    command_buffer.wait_until_completed();
 470
 471                    // Read pixels from the texture
 472                    let texture = drawable.texture();
 473                    let width = texture.width() as u32;
 474                    let height = texture.height() as u32;
 475                    let bytes_per_row = width as usize * 4;
 476                    let buffer_size = height as usize * bytes_per_row;
 477
 478                    let mut pixels = vec![0u8; buffer_size];
 479
 480                    let region = metal::MTLRegion {
 481                        origin: metal::MTLOrigin { x: 0, y: 0, z: 0 },
 482                        size: metal::MTLSize {
 483                            width: width as u64,
 484                            height: height as u64,
 485                            depth: 1,
 486                        },
 487                    };
 488
 489                    texture.get_bytes(
 490                        pixels.as_mut_ptr() as *mut std::ffi::c_void,
 491                        bytes_per_row as u64,
 492                        region,
 493                        0,
 494                    );
 495
 496                    // Convert BGRA to RGBA (swap B and R channels)
 497                    for chunk in pixels.chunks_exact_mut(4) {
 498                        chunk.swap(0, 2);
 499                    }
 500
 501                    return RgbaImage::from_raw(width, height, pixels).ok_or_else(|| {
 502                        anyhow::anyhow!("Failed to create RgbaImage from pixel data")
 503                    });
 504                }
 505                Err(err) => {
 506                    log::error!(
 507                        "failed to render: {}. retrying with larger instance buffer size",
 508                        err
 509                    );
 510                    let mut instance_buffer_pool = self.instance_buffer_pool.lock();
 511                    let buffer_size = instance_buffer_pool.buffer_size;
 512                    if buffer_size >= 256 * 1024 * 1024 {
 513                        anyhow::bail!("instance buffer size grew too large: {}", buffer_size);
 514                    }
 515                    instance_buffer_pool.reset(buffer_size * 2);
 516                    log::info!(
 517                        "increased instance buffer size to {}",
 518                        instance_buffer_pool.buffer_size
 519                    );
 520                }
 521            }
 522        }
 523    }
 524
 525    fn draw_primitives(
 526        &mut self,
 527        scene: &Scene,
 528        instance_buffer: &mut InstanceBuffer,
 529        drawable: &metal::MetalDrawableRef,
 530        viewport_size: Size<DevicePixels>,
 531    ) -> Result<metal::CommandBuffer> {
 532        let command_queue = self.command_queue.clone();
 533        let command_buffer = command_queue.new_command_buffer();
 534        let alpha = if self.layer.is_opaque() { 1. } else { 0. };
 535        let mut instance_offset = 0;
 536
 537        let mut command_encoder = new_command_encoder(
 538            command_buffer,
 539            drawable,
 540            viewport_size,
 541            |color_attachment| {
 542                color_attachment.set_load_action(metal::MTLLoadAction::Clear);
 543                color_attachment.set_clear_color(metal::MTLClearColor::new(0., 0., 0., alpha));
 544            },
 545        );
 546
 547        for batch in scene.batches() {
 548            let ok = match batch {
 549                PrimitiveBatch::Shadows(shadows) => self.draw_shadows(
 550                    shadows,
 551                    instance_buffer,
 552                    &mut instance_offset,
 553                    viewport_size,
 554                    command_encoder,
 555                ),
 556                PrimitiveBatch::Quads(quads) => self.draw_quads(
 557                    quads,
 558                    instance_buffer,
 559                    &mut instance_offset,
 560                    viewport_size,
 561                    command_encoder,
 562                ),
 563                PrimitiveBatch::Paths(paths) => {
 564                    command_encoder.end_encoding();
 565
 566                    let did_draw = self.draw_paths_to_intermediate(
 567                        paths,
 568                        instance_buffer,
 569                        &mut instance_offset,
 570                        viewport_size,
 571                        command_buffer,
 572                    );
 573
 574                    command_encoder = new_command_encoder(
 575                        command_buffer,
 576                        drawable,
 577                        viewport_size,
 578                        |color_attachment| {
 579                            color_attachment.set_load_action(metal::MTLLoadAction::Load);
 580                        },
 581                    );
 582
 583                    if did_draw {
 584                        self.draw_paths_from_intermediate(
 585                            paths,
 586                            instance_buffer,
 587                            &mut instance_offset,
 588                            viewport_size,
 589                            command_encoder,
 590                        )
 591                    } else {
 592                        false
 593                    }
 594                }
 595                PrimitiveBatch::Underlines(underlines) => self.draw_underlines(
 596                    underlines,
 597                    instance_buffer,
 598                    &mut instance_offset,
 599                    viewport_size,
 600                    command_encoder,
 601                ),
 602                PrimitiveBatch::MonochromeSprites {
 603                    texture_id,
 604                    sprites,
 605                } => self.draw_monochrome_sprites(
 606                    texture_id,
 607                    sprites,
 608                    instance_buffer,
 609                    &mut instance_offset,
 610                    viewport_size,
 611                    command_encoder,
 612                ),
 613                PrimitiveBatch::PolychromeSprites {
 614                    texture_id,
 615                    sprites,
 616                } => self.draw_polychrome_sprites(
 617                    texture_id,
 618                    sprites,
 619                    instance_buffer,
 620                    &mut instance_offset,
 621                    viewport_size,
 622                    command_encoder,
 623                ),
 624                PrimitiveBatch::Surfaces(surfaces) => self.draw_surfaces(
 625                    surfaces,
 626                    instance_buffer,
 627                    &mut instance_offset,
 628                    viewport_size,
 629                    command_encoder,
 630                ),
 631            };
 632            if !ok {
 633                command_encoder.end_encoding();
 634                anyhow::bail!(
 635                    "scene too large: {} paths, {} shadows, {} quads, {} underlines, {} mono, {} poly, {} surfaces",
 636                    scene.paths.len(),
 637                    scene.shadows.len(),
 638                    scene.quads.len(),
 639                    scene.underlines.len(),
 640                    scene.monochrome_sprites.len(),
 641                    scene.polychrome_sprites.len(),
 642                    scene.surfaces.len(),
 643                );
 644            }
 645        }
 646
 647        command_encoder.end_encoding();
 648
 649        instance_buffer.metal_buffer.did_modify_range(NSRange {
 650            location: 0,
 651            length: instance_offset as NSUInteger,
 652        });
 653        Ok(command_buffer.to_owned())
 654    }
 655
 656    fn draw_paths_to_intermediate(
 657        &self,
 658        paths: &[Path<ScaledPixels>],
 659        instance_buffer: &mut InstanceBuffer,
 660        instance_offset: &mut usize,
 661        viewport_size: Size<DevicePixels>,
 662        command_buffer: &metal::CommandBufferRef,
 663    ) -> bool {
 664        if paths.is_empty() {
 665            return true;
 666        }
 667        let Some(intermediate_texture) = &self.path_intermediate_texture else {
 668            return false;
 669        };
 670
 671        let render_pass_descriptor = metal::RenderPassDescriptor::new();
 672        let color_attachment = render_pass_descriptor
 673            .color_attachments()
 674            .object_at(0)
 675            .unwrap();
 676        color_attachment.set_load_action(metal::MTLLoadAction::Clear);
 677        color_attachment.set_clear_color(metal::MTLClearColor::new(0., 0., 0., 0.));
 678
 679        if let Some(msaa_texture) = &self.path_intermediate_msaa_texture {
 680            color_attachment.set_texture(Some(msaa_texture));
 681            color_attachment.set_resolve_texture(Some(intermediate_texture));
 682            color_attachment.set_store_action(metal::MTLStoreAction::MultisampleResolve);
 683        } else {
 684            color_attachment.set_texture(Some(intermediate_texture));
 685            color_attachment.set_store_action(metal::MTLStoreAction::Store);
 686        }
 687
 688        let command_encoder = command_buffer.new_render_command_encoder(render_pass_descriptor);
 689        command_encoder.set_render_pipeline_state(&self.paths_rasterization_pipeline_state);
 690
 691        align_offset(instance_offset);
 692        let mut vertices = Vec::new();
 693        for path in paths {
 694            vertices.extend(path.vertices.iter().map(|v| PathRasterizationVertex {
 695                xy_position: v.xy_position,
 696                st_position: v.st_position,
 697                color: path.color,
 698                bounds: path.bounds.intersect(&path.content_mask.bounds),
 699            }));
 700        }
 701        let vertices_bytes_len = mem::size_of_val(vertices.as_slice());
 702        let next_offset = *instance_offset + vertices_bytes_len;
 703        if next_offset > instance_buffer.size {
 704            command_encoder.end_encoding();
 705            return false;
 706        }
 707        command_encoder.set_vertex_buffer(
 708            PathRasterizationInputIndex::Vertices as u64,
 709            Some(&instance_buffer.metal_buffer),
 710            *instance_offset as u64,
 711        );
 712        command_encoder.set_vertex_bytes(
 713            PathRasterizationInputIndex::ViewportSize as u64,
 714            mem::size_of_val(&viewport_size) as u64,
 715            &viewport_size as *const Size<DevicePixels> as *const _,
 716        );
 717        command_encoder.set_fragment_buffer(
 718            PathRasterizationInputIndex::Vertices as u64,
 719            Some(&instance_buffer.metal_buffer),
 720            *instance_offset as u64,
 721        );
 722        let buffer_contents =
 723            unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
 724        unsafe {
 725            ptr::copy_nonoverlapping(
 726                vertices.as_ptr() as *const u8,
 727                buffer_contents,
 728                vertices_bytes_len,
 729            );
 730        }
 731        command_encoder.draw_primitives(
 732            metal::MTLPrimitiveType::Triangle,
 733            0,
 734            vertices.len() as u64,
 735        );
 736        *instance_offset = next_offset;
 737
 738        command_encoder.end_encoding();
 739        true
 740    }
 741
 742    fn draw_shadows(
 743        &self,
 744        shadows: &[Shadow],
 745        instance_buffer: &mut InstanceBuffer,
 746        instance_offset: &mut usize,
 747        viewport_size: Size<DevicePixels>,
 748        command_encoder: &metal::RenderCommandEncoderRef,
 749    ) -> bool {
 750        if shadows.is_empty() {
 751            return true;
 752        }
 753        align_offset(instance_offset);
 754
 755        command_encoder.set_render_pipeline_state(&self.shadows_pipeline_state);
 756        command_encoder.set_vertex_buffer(
 757            ShadowInputIndex::Vertices as u64,
 758            Some(&self.unit_vertices),
 759            0,
 760        );
 761        command_encoder.set_vertex_buffer(
 762            ShadowInputIndex::Shadows as u64,
 763            Some(&instance_buffer.metal_buffer),
 764            *instance_offset as u64,
 765        );
 766        command_encoder.set_fragment_buffer(
 767            ShadowInputIndex::Shadows as u64,
 768            Some(&instance_buffer.metal_buffer),
 769            *instance_offset as u64,
 770        );
 771
 772        command_encoder.set_vertex_bytes(
 773            ShadowInputIndex::ViewportSize as u64,
 774            mem::size_of_val(&viewport_size) as u64,
 775            &viewport_size as *const Size<DevicePixels> as *const _,
 776        );
 777
 778        let shadow_bytes_len = mem::size_of_val(shadows);
 779        let buffer_contents =
 780            unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
 781
 782        let next_offset = *instance_offset + shadow_bytes_len;
 783        if next_offset > instance_buffer.size {
 784            return false;
 785        }
 786
 787        unsafe {
 788            ptr::copy_nonoverlapping(
 789                shadows.as_ptr() as *const u8,
 790                buffer_contents,
 791                shadow_bytes_len,
 792            );
 793        }
 794
 795        command_encoder.draw_primitives_instanced(
 796            metal::MTLPrimitiveType::Triangle,
 797            0,
 798            6,
 799            shadows.len() as u64,
 800        );
 801        *instance_offset = next_offset;
 802        true
 803    }
 804
 805    fn draw_quads(
 806        &self,
 807        quads: &[Quad],
 808        instance_buffer: &mut InstanceBuffer,
 809        instance_offset: &mut usize,
 810        viewport_size: Size<DevicePixels>,
 811        command_encoder: &metal::RenderCommandEncoderRef,
 812    ) -> bool {
 813        if quads.is_empty() {
 814            return true;
 815        }
 816        align_offset(instance_offset);
 817
 818        command_encoder.set_render_pipeline_state(&self.quads_pipeline_state);
 819        command_encoder.set_vertex_buffer(
 820            QuadInputIndex::Vertices as u64,
 821            Some(&self.unit_vertices),
 822            0,
 823        );
 824        command_encoder.set_vertex_buffer(
 825            QuadInputIndex::Quads as u64,
 826            Some(&instance_buffer.metal_buffer),
 827            *instance_offset as u64,
 828        );
 829        command_encoder.set_fragment_buffer(
 830            QuadInputIndex::Quads as u64,
 831            Some(&instance_buffer.metal_buffer),
 832            *instance_offset as u64,
 833        );
 834
 835        command_encoder.set_vertex_bytes(
 836            QuadInputIndex::ViewportSize as u64,
 837            mem::size_of_val(&viewport_size) as u64,
 838            &viewport_size as *const Size<DevicePixels> as *const _,
 839        );
 840
 841        let quad_bytes_len = mem::size_of_val(quads);
 842        let buffer_contents =
 843            unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
 844
 845        let next_offset = *instance_offset + quad_bytes_len;
 846        if next_offset > instance_buffer.size {
 847            return false;
 848        }
 849
 850        unsafe {
 851            ptr::copy_nonoverlapping(quads.as_ptr() as *const u8, buffer_contents, quad_bytes_len);
 852        }
 853
 854        command_encoder.draw_primitives_instanced(
 855            metal::MTLPrimitiveType::Triangle,
 856            0,
 857            6,
 858            quads.len() as u64,
 859        );
 860        *instance_offset = next_offset;
 861        true
 862    }
 863
 864    fn draw_paths_from_intermediate(
 865        &self,
 866        paths: &[Path<ScaledPixels>],
 867        instance_buffer: &mut InstanceBuffer,
 868        instance_offset: &mut usize,
 869        viewport_size: Size<DevicePixels>,
 870        command_encoder: &metal::RenderCommandEncoderRef,
 871    ) -> bool {
 872        let Some(first_path) = paths.first() else {
 873            return true;
 874        };
 875
 876        let Some(ref intermediate_texture) = self.path_intermediate_texture else {
 877            return false;
 878        };
 879
 880        command_encoder.set_render_pipeline_state(&self.path_sprites_pipeline_state);
 881        command_encoder.set_vertex_buffer(
 882            SpriteInputIndex::Vertices as u64,
 883            Some(&self.unit_vertices),
 884            0,
 885        );
 886        command_encoder.set_vertex_bytes(
 887            SpriteInputIndex::ViewportSize as u64,
 888            mem::size_of_val(&viewport_size) as u64,
 889            &viewport_size as *const Size<DevicePixels> as *const _,
 890        );
 891
 892        command_encoder.set_fragment_texture(
 893            SpriteInputIndex::AtlasTexture as u64,
 894            Some(intermediate_texture),
 895        );
 896
 897        // When copying paths from the intermediate texture to the drawable,
 898        // each pixel must only be copied once, in case of transparent paths.
 899        //
 900        // If all paths have the same draw order, then their bounds are all
 901        // disjoint, so we can copy each path's bounds individually. If this
 902        // batch combines different draw orders, we perform a single copy
 903        // for a minimal spanning rect.
 904        let sprites;
 905        if paths.last().unwrap().order == first_path.order {
 906            sprites = paths
 907                .iter()
 908                .map(|path| PathSprite {
 909                    bounds: path.clipped_bounds(),
 910                })
 911                .collect();
 912        } else {
 913            let mut bounds = first_path.clipped_bounds();
 914            for path in paths.iter().skip(1) {
 915                bounds = bounds.union(&path.clipped_bounds());
 916            }
 917            sprites = vec![PathSprite { bounds }];
 918        }
 919
 920        align_offset(instance_offset);
 921        let sprite_bytes_len = mem::size_of_val(sprites.as_slice());
 922        let next_offset = *instance_offset + sprite_bytes_len;
 923        if next_offset > instance_buffer.size {
 924            return false;
 925        }
 926
 927        command_encoder.set_vertex_buffer(
 928            SpriteInputIndex::Sprites as u64,
 929            Some(&instance_buffer.metal_buffer),
 930            *instance_offset as u64,
 931        );
 932
 933        let buffer_contents =
 934            unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
 935        unsafe {
 936            ptr::copy_nonoverlapping(
 937                sprites.as_ptr() as *const u8,
 938                buffer_contents,
 939                sprite_bytes_len,
 940            );
 941        }
 942
 943        command_encoder.draw_primitives_instanced(
 944            metal::MTLPrimitiveType::Triangle,
 945            0,
 946            6,
 947            sprites.len() as u64,
 948        );
 949        *instance_offset = next_offset;
 950
 951        true
 952    }
 953
 954    fn draw_underlines(
 955        &self,
 956        underlines: &[Underline],
 957        instance_buffer: &mut InstanceBuffer,
 958        instance_offset: &mut usize,
 959        viewport_size: Size<DevicePixels>,
 960        command_encoder: &metal::RenderCommandEncoderRef,
 961    ) -> bool {
 962        if underlines.is_empty() {
 963            return true;
 964        }
 965        align_offset(instance_offset);
 966
 967        command_encoder.set_render_pipeline_state(&self.underlines_pipeline_state);
 968        command_encoder.set_vertex_buffer(
 969            UnderlineInputIndex::Vertices as u64,
 970            Some(&self.unit_vertices),
 971            0,
 972        );
 973        command_encoder.set_vertex_buffer(
 974            UnderlineInputIndex::Underlines as u64,
 975            Some(&instance_buffer.metal_buffer),
 976            *instance_offset as u64,
 977        );
 978        command_encoder.set_fragment_buffer(
 979            UnderlineInputIndex::Underlines as u64,
 980            Some(&instance_buffer.metal_buffer),
 981            *instance_offset as u64,
 982        );
 983
 984        command_encoder.set_vertex_bytes(
 985            UnderlineInputIndex::ViewportSize as u64,
 986            mem::size_of_val(&viewport_size) as u64,
 987            &viewport_size as *const Size<DevicePixels> as *const _,
 988        );
 989
 990        let underline_bytes_len = mem::size_of_val(underlines);
 991        let buffer_contents =
 992            unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
 993
 994        let next_offset = *instance_offset + underline_bytes_len;
 995        if next_offset > instance_buffer.size {
 996            return false;
 997        }
 998
 999        unsafe {
1000            ptr::copy_nonoverlapping(
1001                underlines.as_ptr() as *const u8,
1002                buffer_contents,
1003                underline_bytes_len,
1004            );
1005        }
1006
1007        command_encoder.draw_primitives_instanced(
1008            metal::MTLPrimitiveType::Triangle,
1009            0,
1010            6,
1011            underlines.len() as u64,
1012        );
1013        *instance_offset = next_offset;
1014        true
1015    }
1016
1017    fn draw_monochrome_sprites(
1018        &self,
1019        texture_id: AtlasTextureId,
1020        sprites: &[MonochromeSprite],
1021        instance_buffer: &mut InstanceBuffer,
1022        instance_offset: &mut usize,
1023        viewport_size: Size<DevicePixels>,
1024        command_encoder: &metal::RenderCommandEncoderRef,
1025    ) -> bool {
1026        if sprites.is_empty() {
1027            return true;
1028        }
1029        align_offset(instance_offset);
1030
1031        let sprite_bytes_len = mem::size_of_val(sprites);
1032        let buffer_contents =
1033            unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
1034
1035        let next_offset = *instance_offset + sprite_bytes_len;
1036        if next_offset > instance_buffer.size {
1037            return false;
1038        }
1039
1040        let texture = self.sprite_atlas.metal_texture(texture_id);
1041        let texture_size = size(
1042            DevicePixels(texture.width() as i32),
1043            DevicePixels(texture.height() as i32),
1044        );
1045        command_encoder.set_render_pipeline_state(&self.monochrome_sprites_pipeline_state);
1046        command_encoder.set_vertex_buffer(
1047            SpriteInputIndex::Vertices as u64,
1048            Some(&self.unit_vertices),
1049            0,
1050        );
1051        command_encoder.set_vertex_buffer(
1052            SpriteInputIndex::Sprites as u64,
1053            Some(&instance_buffer.metal_buffer),
1054            *instance_offset as u64,
1055        );
1056        command_encoder.set_vertex_bytes(
1057            SpriteInputIndex::ViewportSize as u64,
1058            mem::size_of_val(&viewport_size) as u64,
1059            &viewport_size as *const Size<DevicePixels> as *const _,
1060        );
1061        command_encoder.set_vertex_bytes(
1062            SpriteInputIndex::AtlasTextureSize as u64,
1063            mem::size_of_val(&texture_size) as u64,
1064            &texture_size as *const Size<DevicePixels> as *const _,
1065        );
1066        command_encoder.set_fragment_buffer(
1067            SpriteInputIndex::Sprites as u64,
1068            Some(&instance_buffer.metal_buffer),
1069            *instance_offset as u64,
1070        );
1071        command_encoder.set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
1072
1073        unsafe {
1074            ptr::copy_nonoverlapping(
1075                sprites.as_ptr() as *const u8,
1076                buffer_contents,
1077                sprite_bytes_len,
1078            );
1079        }
1080
1081        command_encoder.draw_primitives_instanced(
1082            metal::MTLPrimitiveType::Triangle,
1083            0,
1084            6,
1085            sprites.len() as u64,
1086        );
1087        *instance_offset = next_offset;
1088        true
1089    }
1090
1091    fn draw_polychrome_sprites(
1092        &self,
1093        texture_id: AtlasTextureId,
1094        sprites: &[PolychromeSprite],
1095        instance_buffer: &mut InstanceBuffer,
1096        instance_offset: &mut usize,
1097        viewport_size: Size<DevicePixels>,
1098        command_encoder: &metal::RenderCommandEncoderRef,
1099    ) -> bool {
1100        if sprites.is_empty() {
1101            return true;
1102        }
1103        align_offset(instance_offset);
1104
1105        let texture = self.sprite_atlas.metal_texture(texture_id);
1106        let texture_size = size(
1107            DevicePixels(texture.width() as i32),
1108            DevicePixels(texture.height() as i32),
1109        );
1110        command_encoder.set_render_pipeline_state(&self.polychrome_sprites_pipeline_state);
1111        command_encoder.set_vertex_buffer(
1112            SpriteInputIndex::Vertices as u64,
1113            Some(&self.unit_vertices),
1114            0,
1115        );
1116        command_encoder.set_vertex_buffer(
1117            SpriteInputIndex::Sprites as u64,
1118            Some(&instance_buffer.metal_buffer),
1119            *instance_offset as u64,
1120        );
1121        command_encoder.set_vertex_bytes(
1122            SpriteInputIndex::ViewportSize as u64,
1123            mem::size_of_val(&viewport_size) as u64,
1124            &viewport_size as *const Size<DevicePixels> as *const _,
1125        );
1126        command_encoder.set_vertex_bytes(
1127            SpriteInputIndex::AtlasTextureSize as u64,
1128            mem::size_of_val(&texture_size) as u64,
1129            &texture_size as *const Size<DevicePixels> as *const _,
1130        );
1131        command_encoder.set_fragment_buffer(
1132            SpriteInputIndex::Sprites as u64,
1133            Some(&instance_buffer.metal_buffer),
1134            *instance_offset as u64,
1135        );
1136        command_encoder.set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
1137
1138        let sprite_bytes_len = mem::size_of_val(sprites);
1139        let buffer_contents =
1140            unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
1141
1142        let next_offset = *instance_offset + sprite_bytes_len;
1143        if next_offset > instance_buffer.size {
1144            return false;
1145        }
1146
1147        unsafe {
1148            ptr::copy_nonoverlapping(
1149                sprites.as_ptr() as *const u8,
1150                buffer_contents,
1151                sprite_bytes_len,
1152            );
1153        }
1154
1155        command_encoder.draw_primitives_instanced(
1156            metal::MTLPrimitiveType::Triangle,
1157            0,
1158            6,
1159            sprites.len() as u64,
1160        );
1161        *instance_offset = next_offset;
1162        true
1163    }
1164
1165    fn draw_surfaces(
1166        &mut self,
1167        surfaces: &[PaintSurface],
1168        instance_buffer: &mut InstanceBuffer,
1169        instance_offset: &mut usize,
1170        viewport_size: Size<DevicePixels>,
1171        command_encoder: &metal::RenderCommandEncoderRef,
1172    ) -> bool {
1173        command_encoder.set_render_pipeline_state(&self.surfaces_pipeline_state);
1174        command_encoder.set_vertex_buffer(
1175            SurfaceInputIndex::Vertices as u64,
1176            Some(&self.unit_vertices),
1177            0,
1178        );
1179        command_encoder.set_vertex_bytes(
1180            SurfaceInputIndex::ViewportSize as u64,
1181            mem::size_of_val(&viewport_size) as u64,
1182            &viewport_size as *const Size<DevicePixels> as *const _,
1183        );
1184
1185        for surface in surfaces {
1186            let texture_size = size(
1187                DevicePixels::from(surface.image_buffer.get_width() as i32),
1188                DevicePixels::from(surface.image_buffer.get_height() as i32),
1189            );
1190
1191            assert_eq!(
1192                surface.image_buffer.get_pixel_format(),
1193                kCVPixelFormatType_420YpCbCr8BiPlanarFullRange
1194            );
1195
1196            let y_texture = self
1197                .core_video_texture_cache
1198                .create_texture_from_image(
1199                    surface.image_buffer.as_concrete_TypeRef(),
1200                    None,
1201                    MTLPixelFormat::R8Unorm,
1202                    surface.image_buffer.get_width_of_plane(0),
1203                    surface.image_buffer.get_height_of_plane(0),
1204                    0,
1205                )
1206                .unwrap();
1207            let cb_cr_texture = self
1208                .core_video_texture_cache
1209                .create_texture_from_image(
1210                    surface.image_buffer.as_concrete_TypeRef(),
1211                    None,
1212                    MTLPixelFormat::RG8Unorm,
1213                    surface.image_buffer.get_width_of_plane(1),
1214                    surface.image_buffer.get_height_of_plane(1),
1215                    1,
1216                )
1217                .unwrap();
1218
1219            align_offset(instance_offset);
1220            let next_offset = *instance_offset + mem::size_of::<Surface>();
1221            if next_offset > instance_buffer.size {
1222                return false;
1223            }
1224
1225            command_encoder.set_vertex_buffer(
1226                SurfaceInputIndex::Surfaces as u64,
1227                Some(&instance_buffer.metal_buffer),
1228                *instance_offset as u64,
1229            );
1230            command_encoder.set_vertex_bytes(
1231                SurfaceInputIndex::TextureSize as u64,
1232                mem::size_of_val(&texture_size) as u64,
1233                &texture_size as *const Size<DevicePixels> as *const _,
1234            );
1235            // let y_texture = y_texture.get_texture().unwrap().
1236            command_encoder.set_fragment_texture(SurfaceInputIndex::YTexture as u64, unsafe {
1237                let texture = CVMetalTextureGetTexture(y_texture.as_concrete_TypeRef());
1238                Some(metal::TextureRef::from_ptr(texture as *mut _))
1239            });
1240            command_encoder.set_fragment_texture(SurfaceInputIndex::CbCrTexture as u64, unsafe {
1241                let texture = CVMetalTextureGetTexture(cb_cr_texture.as_concrete_TypeRef());
1242                Some(metal::TextureRef::from_ptr(texture as *mut _))
1243            });
1244
1245            unsafe {
1246                let buffer_contents = (instance_buffer.metal_buffer.contents() as *mut u8)
1247                    .add(*instance_offset)
1248                    as *mut SurfaceBounds;
1249                ptr::write(
1250                    buffer_contents,
1251                    SurfaceBounds {
1252                        bounds: surface.bounds,
1253                        content_mask: surface.content_mask.clone(),
1254                    },
1255                );
1256            }
1257
1258            command_encoder.draw_primitives(metal::MTLPrimitiveType::Triangle, 0, 6);
1259            *instance_offset = next_offset;
1260        }
1261        true
1262    }
1263}
1264
1265fn new_command_encoder<'a>(
1266    command_buffer: &'a metal::CommandBufferRef,
1267    drawable: &'a metal::MetalDrawableRef,
1268    viewport_size: Size<DevicePixels>,
1269    configure_color_attachment: impl Fn(&RenderPassColorAttachmentDescriptorRef),
1270) -> &'a metal::RenderCommandEncoderRef {
1271    let render_pass_descriptor = metal::RenderPassDescriptor::new();
1272    let color_attachment = render_pass_descriptor
1273        .color_attachments()
1274        .object_at(0)
1275        .unwrap();
1276    color_attachment.set_texture(Some(drawable.texture()));
1277    color_attachment.set_store_action(metal::MTLStoreAction::Store);
1278    configure_color_attachment(color_attachment);
1279
1280    let command_encoder = command_buffer.new_render_command_encoder(render_pass_descriptor);
1281    command_encoder.set_viewport(metal::MTLViewport {
1282        originX: 0.0,
1283        originY: 0.0,
1284        width: i32::from(viewport_size.width) as f64,
1285        height: i32::from(viewport_size.height) as f64,
1286        znear: 0.0,
1287        zfar: 1.0,
1288    });
1289    command_encoder
1290}
1291
1292fn build_pipeline_state(
1293    device: &metal::DeviceRef,
1294    library: &metal::LibraryRef,
1295    label: &str,
1296    vertex_fn_name: &str,
1297    fragment_fn_name: &str,
1298    pixel_format: metal::MTLPixelFormat,
1299) -> metal::RenderPipelineState {
1300    let vertex_fn = library
1301        .get_function(vertex_fn_name, None)
1302        .expect("error locating vertex function");
1303    let fragment_fn = library
1304        .get_function(fragment_fn_name, None)
1305        .expect("error locating fragment function");
1306
1307    let descriptor = metal::RenderPipelineDescriptor::new();
1308    descriptor.set_label(label);
1309    descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
1310    descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
1311    let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
1312    color_attachment.set_pixel_format(pixel_format);
1313    color_attachment.set_blending_enabled(true);
1314    color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
1315    color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
1316    color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::SourceAlpha);
1317    color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::One);
1318    color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
1319    color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::One);
1320
1321    device
1322        .new_render_pipeline_state(&descriptor)
1323        .expect("could not create render pipeline state")
1324}
1325
1326fn build_path_sprite_pipeline_state(
1327    device: &metal::DeviceRef,
1328    library: &metal::LibraryRef,
1329    label: &str,
1330    vertex_fn_name: &str,
1331    fragment_fn_name: &str,
1332    pixel_format: metal::MTLPixelFormat,
1333) -> metal::RenderPipelineState {
1334    let vertex_fn = library
1335        .get_function(vertex_fn_name, None)
1336        .expect("error locating vertex function");
1337    let fragment_fn = library
1338        .get_function(fragment_fn_name, None)
1339        .expect("error locating fragment function");
1340
1341    let descriptor = metal::RenderPipelineDescriptor::new();
1342    descriptor.set_label(label);
1343    descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
1344    descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
1345    let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
1346    color_attachment.set_pixel_format(pixel_format);
1347    color_attachment.set_blending_enabled(true);
1348    color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
1349    color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
1350    color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::One);
1351    color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::One);
1352    color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
1353    color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::One);
1354
1355    device
1356        .new_render_pipeline_state(&descriptor)
1357        .expect("could not create render pipeline state")
1358}
1359
1360fn build_path_rasterization_pipeline_state(
1361    device: &metal::DeviceRef,
1362    library: &metal::LibraryRef,
1363    label: &str,
1364    vertex_fn_name: &str,
1365    fragment_fn_name: &str,
1366    pixel_format: metal::MTLPixelFormat,
1367    path_sample_count: u32,
1368) -> metal::RenderPipelineState {
1369    let vertex_fn = library
1370        .get_function(vertex_fn_name, None)
1371        .expect("error locating vertex function");
1372    let fragment_fn = library
1373        .get_function(fragment_fn_name, None)
1374        .expect("error locating fragment function");
1375
1376    let descriptor = metal::RenderPipelineDescriptor::new();
1377    descriptor.set_label(label);
1378    descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
1379    descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
1380    if path_sample_count > 1 {
1381        descriptor.set_raster_sample_count(path_sample_count as _);
1382        descriptor.set_alpha_to_coverage_enabled(false);
1383    }
1384    let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
1385    color_attachment.set_pixel_format(pixel_format);
1386    color_attachment.set_blending_enabled(true);
1387    color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
1388    color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
1389    color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::One);
1390    color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::One);
1391    color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
1392    color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
1393
1394    device
1395        .new_render_pipeline_state(&descriptor)
1396        .expect("could not create render pipeline state")
1397}
1398
1399// Align to multiples of 256 make Metal happy.
1400fn align_offset(offset: &mut usize) {
1401    *offset = (*offset).div_ceil(256) * 256;
1402}
1403
1404#[repr(C)]
1405enum ShadowInputIndex {
1406    Vertices = 0,
1407    Shadows = 1,
1408    ViewportSize = 2,
1409}
1410
1411#[repr(C)]
1412enum QuadInputIndex {
1413    Vertices = 0,
1414    Quads = 1,
1415    ViewportSize = 2,
1416}
1417
1418#[repr(C)]
1419enum UnderlineInputIndex {
1420    Vertices = 0,
1421    Underlines = 1,
1422    ViewportSize = 2,
1423}
1424
1425#[repr(C)]
1426enum SpriteInputIndex {
1427    Vertices = 0,
1428    Sprites = 1,
1429    ViewportSize = 2,
1430    AtlasTextureSize = 3,
1431    AtlasTexture = 4,
1432}
1433
1434#[repr(C)]
1435enum SurfaceInputIndex {
1436    Vertices = 0,
1437    Surfaces = 1,
1438    ViewportSize = 2,
1439    TextureSize = 3,
1440    YTexture = 4,
1441    CbCrTexture = 5,
1442}
1443
1444#[repr(C)]
1445enum PathRasterizationInputIndex {
1446    Vertices = 0,
1447    ViewportSize = 1,
1448}
1449
1450#[derive(Clone, Debug, Eq, PartialEq)]
1451#[repr(C)]
1452pub struct PathSprite {
1453    pub bounds: Bounds<ScaledPixels>,
1454}
1455
1456#[derive(Clone, Debug, Eq, PartialEq)]
1457#[repr(C)]
1458pub struct SurfaceBounds {
1459    pub bounds: Bounds<ScaledPixels>,
1460    pub content_mask: ContentMask<ScaledPixels>,
1461}