metal_renderer.rs

   1use super::metal_atlas::MetalAtlas;
   2use crate::{
   3    AtlasTextureId, Background, Bounds, ContentMask, DevicePixels, MonochromeSprite, PaintSurface,
   4    Path, Point, PolychromeSprite, PrimitiveBatch, Quad, ScaledPixels, Scene, Shadow, Size,
   5    Surface, Underline, point, size,
   6};
   7use anyhow::Result;
   8use block::ConcreteBlock;
   9use cocoa::{
  10    base::{NO, YES},
  11    foundation::{NSSize, NSUInteger},
  12    quartzcore::AutoresizingMask,
  13};
  14
  15use core_foundation::base::TCFType;
  16use core_video::{
  17    metal_texture::CVMetalTextureGetTexture, metal_texture_cache::CVMetalTextureCache,
  18    pixel_buffer::kCVPixelFormatType_420YpCbCr8BiPlanarFullRange,
  19};
  20use foreign_types::{ForeignType, ForeignTypeRef};
  21use metal::{
  22    CAMetalLayer, CommandQueue, MTLPixelFormat, MTLResourceOptions, NSRange,
  23    RenderPassColorAttachmentDescriptorRef,
  24};
  25use objc::{self, msg_send, sel, sel_impl};
  26use parking_lot::Mutex;
  27
  28use std::{cell::Cell, ffi::c_void, mem, ptr, sync::Arc};
  29
  30// Exported to metal
  31pub(crate) type PointF = crate::Point<f32>;
  32
  33#[cfg(not(feature = "runtime_shaders"))]
  34const SHADERS_METALLIB: &[u8] = include_bytes!(concat!(env!("OUT_DIR"), "/shaders.metallib"));
  35#[cfg(feature = "runtime_shaders")]
  36const SHADERS_SOURCE_FILE: &str = include_str!(concat!(env!("OUT_DIR"), "/stitched_shaders.metal"));
  37// Use 4x MSAA, all devices support it.
  38// https://developer.apple.com/documentation/metal/mtldevice/1433355-supportstexturesamplecount
  39const PATH_SAMPLE_COUNT: u32 = 4;
  40
  41pub type Context = Arc<Mutex<InstanceBufferPool>>;
  42pub type Renderer = MetalRenderer;
  43
  44pub unsafe fn new_renderer(
  45    context: self::Context,
  46    _native_window: *mut c_void,
  47    _native_view: *mut c_void,
  48    _bounds: crate::Size<f32>,
  49    _transparent: bool,
  50) -> Renderer {
  51    MetalRenderer::new(context)
  52}
  53
  54pub(crate) struct InstanceBufferPool {
  55    buffer_size: usize,
  56    buffers: Vec<metal::Buffer>,
  57}
  58
  59impl Default for InstanceBufferPool {
  60    fn default() -> Self {
  61        Self {
  62            buffer_size: 2 * 1024 * 1024,
  63            buffers: Vec::new(),
  64        }
  65    }
  66}
  67
  68pub(crate) struct InstanceBuffer {
  69    metal_buffer: metal::Buffer,
  70    size: usize,
  71}
  72
  73impl InstanceBufferPool {
  74    pub(crate) fn reset(&mut self, buffer_size: usize) {
  75        self.buffer_size = buffer_size;
  76        self.buffers.clear();
  77    }
  78
  79    pub(crate) fn acquire(
  80        &mut self,
  81        device: &metal::Device,
  82        unified_memory: bool,
  83    ) -> InstanceBuffer {
  84        let buffer = self.buffers.pop().unwrap_or_else(|| {
  85            let options = if unified_memory {
  86                MTLResourceOptions::StorageModeShared
  87                    // Buffers are write only which can benefit from the combined cache
  88                    // https://developer.apple.com/documentation/metal/mtlresourceoptions/cpucachemodewritecombined
  89                    | MTLResourceOptions::CPUCacheModeWriteCombined
  90            } else {
  91                MTLResourceOptions::StorageModeManaged
  92            };
  93
  94            device.new_buffer(self.buffer_size as u64, options)
  95        });
  96        InstanceBuffer {
  97            metal_buffer: buffer,
  98            size: self.buffer_size,
  99        }
 100    }
 101
 102    pub(crate) fn release(&mut self, buffer: InstanceBuffer) {
 103        if buffer.size == self.buffer_size {
 104            self.buffers.push(buffer.metal_buffer)
 105        }
 106    }
 107}
 108
 109pub(crate) struct MetalRenderer {
 110    device: metal::Device,
 111    layer: metal::MetalLayer,
 112    unified_memory: bool,
 113    presents_with_transaction: bool,
 114    command_queue: CommandQueue,
 115    paths_rasterization_pipeline_state: metal::RenderPipelineState,
 116    path_sprites_pipeline_state: metal::RenderPipelineState,
 117    shadows_pipeline_state: metal::RenderPipelineState,
 118    quads_pipeline_state: metal::RenderPipelineState,
 119    underlines_pipeline_state: metal::RenderPipelineState,
 120    monochrome_sprites_pipeline_state: metal::RenderPipelineState,
 121    polychrome_sprites_pipeline_state: metal::RenderPipelineState,
 122    surfaces_pipeline_state: metal::RenderPipelineState,
 123    unit_vertices: metal::Buffer,
 124    #[allow(clippy::arc_with_non_send_sync)]
 125    instance_buffer_pool: Arc<Mutex<InstanceBufferPool>>,
 126    sprite_atlas: Arc<MetalAtlas>,
 127    core_video_texture_cache: core_video::metal_texture_cache::CVMetalTextureCache,
 128    path_intermediate_texture: Option<metal::Texture>,
 129    path_intermediate_msaa_texture: Option<metal::Texture>,
 130    path_sample_count: u32,
 131}
 132
 133#[repr(C)]
 134pub struct PathRasterizationVertex {
 135    pub xy_position: Point<ScaledPixels>,
 136    pub st_position: Point<f32>,
 137    pub color: Background,
 138    pub bounds: Bounds<ScaledPixels>,
 139}
 140
 141impl MetalRenderer {
 142    pub fn new(instance_buffer_pool: Arc<Mutex<InstanceBufferPool>>) -> Self {
 143        // Prefer low‐power integrated GPUs on Intel Mac. On Apple
 144        // Silicon, there is only ever one GPU, so this is equivalent to
 145        // `metal::Device::system_default()`.
 146        let device = if let Some(d) = metal::Device::all()
 147            .into_iter()
 148            .min_by_key(|d| (d.is_removable(), !d.is_low_power()))
 149        {
 150            d
 151        } else {
 152            // For some reason `all()` can return an empty list, see https://github.com/zed-industries/zed/issues/37689
 153            // In that case, we fall back to the system default device.
 154            log::error!(
 155                "Unable to enumerate Metal devices; attempting to use system default device"
 156            );
 157            metal::Device::system_default().unwrap_or_else(|| {
 158                log::error!("unable to access a compatible graphics device");
 159                std::process::exit(1);
 160            })
 161        };
 162
 163        let layer = metal::MetalLayer::new();
 164        layer.set_device(&device);
 165        layer.set_pixel_format(MTLPixelFormat::BGRA8Unorm);
 166        layer.set_opaque(false);
 167        layer.set_maximum_drawable_count(3);
 168        unsafe {
 169            let _: () = msg_send![&*layer, setAllowsNextDrawableTimeout: NO];
 170            let _: () = msg_send![&*layer, setNeedsDisplayOnBoundsChange: YES];
 171            let _: () = msg_send![
 172                &*layer,
 173                setAutoresizingMask: AutoresizingMask::WIDTH_SIZABLE
 174                    | AutoresizingMask::HEIGHT_SIZABLE
 175            ];
 176        }
 177        #[cfg(feature = "runtime_shaders")]
 178        let library = device
 179            .new_library_with_source(&SHADERS_SOURCE_FILE, &metal::CompileOptions::new())
 180            .expect("error building metal library");
 181        #[cfg(not(feature = "runtime_shaders"))]
 182        let library = device
 183            .new_library_with_data(SHADERS_METALLIB)
 184            .expect("error building metal library");
 185
 186        fn to_float2_bits(point: PointF) -> u64 {
 187            let mut output = point.y.to_bits() as u64;
 188            output <<= 32;
 189            output |= point.x.to_bits() as u64;
 190            output
 191        }
 192
 193        // Shared memory can be used only if CPU and GPU share the same memory space.
 194        // https://developer.apple.com/documentation/metal/setting-resource-storage-modes
 195        let unified_memory = device.has_unified_memory();
 196
 197        let unit_vertices = [
 198            to_float2_bits(point(0., 0.)),
 199            to_float2_bits(point(1., 0.)),
 200            to_float2_bits(point(0., 1.)),
 201            to_float2_bits(point(0., 1.)),
 202            to_float2_bits(point(1., 0.)),
 203            to_float2_bits(point(1., 1.)),
 204        ];
 205        let unit_vertices = device.new_buffer_with_data(
 206            unit_vertices.as_ptr() as *const c_void,
 207            mem::size_of_val(&unit_vertices) as u64,
 208            if unified_memory {
 209                MTLResourceOptions::StorageModeShared
 210                    | MTLResourceOptions::CPUCacheModeWriteCombined
 211            } else {
 212                MTLResourceOptions::StorageModeManaged
 213            },
 214        );
 215
 216        let paths_rasterization_pipeline_state = build_path_rasterization_pipeline_state(
 217            &device,
 218            &library,
 219            "paths_rasterization",
 220            "path_rasterization_vertex",
 221            "path_rasterization_fragment",
 222            MTLPixelFormat::BGRA8Unorm,
 223            PATH_SAMPLE_COUNT,
 224        );
 225        let path_sprites_pipeline_state = build_path_sprite_pipeline_state(
 226            &device,
 227            &library,
 228            "path_sprites",
 229            "path_sprite_vertex",
 230            "path_sprite_fragment",
 231            MTLPixelFormat::BGRA8Unorm,
 232        );
 233        let shadows_pipeline_state = build_pipeline_state(
 234            &device,
 235            &library,
 236            "shadows",
 237            "shadow_vertex",
 238            "shadow_fragment",
 239            MTLPixelFormat::BGRA8Unorm,
 240        );
 241        let quads_pipeline_state = build_pipeline_state(
 242            &device,
 243            &library,
 244            "quads",
 245            "quad_vertex",
 246            "quad_fragment",
 247            MTLPixelFormat::BGRA8Unorm,
 248        );
 249        let underlines_pipeline_state = build_pipeline_state(
 250            &device,
 251            &library,
 252            "underlines",
 253            "underline_vertex",
 254            "underline_fragment",
 255            MTLPixelFormat::BGRA8Unorm,
 256        );
 257        let monochrome_sprites_pipeline_state = build_pipeline_state(
 258            &device,
 259            &library,
 260            "monochrome_sprites",
 261            "monochrome_sprite_vertex",
 262            "monochrome_sprite_fragment",
 263            MTLPixelFormat::BGRA8Unorm,
 264        );
 265        let polychrome_sprites_pipeline_state = build_pipeline_state(
 266            &device,
 267            &library,
 268            "polychrome_sprites",
 269            "polychrome_sprite_vertex",
 270            "polychrome_sprite_fragment",
 271            MTLPixelFormat::BGRA8Unorm,
 272        );
 273        let surfaces_pipeline_state = build_pipeline_state(
 274            &device,
 275            &library,
 276            "surfaces",
 277            "surface_vertex",
 278            "surface_fragment",
 279            MTLPixelFormat::BGRA8Unorm,
 280        );
 281
 282        let command_queue = device.new_command_queue();
 283        let sprite_atlas = Arc::new(MetalAtlas::new(device.clone()));
 284        let core_video_texture_cache =
 285            CVMetalTextureCache::new(None, device.clone(), None).unwrap();
 286
 287        Self {
 288            device,
 289            layer,
 290            presents_with_transaction: false,
 291            unified_memory,
 292            command_queue,
 293            paths_rasterization_pipeline_state,
 294            path_sprites_pipeline_state,
 295            shadows_pipeline_state,
 296            quads_pipeline_state,
 297            underlines_pipeline_state,
 298            monochrome_sprites_pipeline_state,
 299            polychrome_sprites_pipeline_state,
 300            surfaces_pipeline_state,
 301            unit_vertices,
 302            instance_buffer_pool,
 303            sprite_atlas,
 304            core_video_texture_cache,
 305            path_intermediate_texture: None,
 306            path_intermediate_msaa_texture: None,
 307            path_sample_count: PATH_SAMPLE_COUNT,
 308        }
 309    }
 310
 311    pub fn layer(&self) -> &metal::MetalLayerRef {
 312        &self.layer
 313    }
 314
 315    pub fn layer_ptr(&self) -> *mut CAMetalLayer {
 316        self.layer.as_ptr()
 317    }
 318
 319    pub fn sprite_atlas(&self) -> &Arc<MetalAtlas> {
 320        &self.sprite_atlas
 321    }
 322
 323    pub fn set_presents_with_transaction(&mut self, presents_with_transaction: bool) {
 324        self.presents_with_transaction = presents_with_transaction;
 325        self.layer
 326            .set_presents_with_transaction(presents_with_transaction);
 327    }
 328
 329    pub fn update_drawable_size(&mut self, size: Size<DevicePixels>) {
 330        let size = NSSize {
 331            width: size.width.0 as f64,
 332            height: size.height.0 as f64,
 333        };
 334        unsafe {
 335            let _: () = msg_send![
 336                self.layer(),
 337                setDrawableSize: size
 338            ];
 339        }
 340        let device_pixels_size = Size {
 341            width: DevicePixels(size.width as i32),
 342            height: DevicePixels(size.height as i32),
 343        };
 344        self.update_path_intermediate_textures(device_pixels_size);
 345    }
 346
 347    fn update_path_intermediate_textures(&mut self, size: Size<DevicePixels>) {
 348        // We are uncertain when this happens, but sometimes size can be 0 here. Most likely before
 349        // the layout pass on window creation. Zero-sized texture creation causes SIGABRT.
 350        // https://github.com/zed-industries/zed/issues/36229
 351        if size.width.0 <= 0 || size.height.0 <= 0 {
 352            self.path_intermediate_texture = None;
 353            self.path_intermediate_msaa_texture = None;
 354            return;
 355        }
 356
 357        let texture_descriptor = metal::TextureDescriptor::new();
 358        texture_descriptor.set_width(size.width.0 as u64);
 359        texture_descriptor.set_height(size.height.0 as u64);
 360        texture_descriptor.set_pixel_format(metal::MTLPixelFormat::BGRA8Unorm);
 361        texture_descriptor.set_storage_mode(metal::MTLStorageMode::Private);
 362        texture_descriptor
 363            .set_usage(metal::MTLTextureUsage::RenderTarget | metal::MTLTextureUsage::ShaderRead);
 364        self.path_intermediate_texture = Some(self.device.new_texture(&texture_descriptor));
 365
 366        if self.path_sample_count > 1 {
 367            // https://developer.apple.com/documentation/metal/choosing-a-resource-storage-mode-for-apple-gpus
 368            // Rendering MSAA textures are done in a single pass, so we can use memory-less storage on Apple Silicon
 369            let storage_mode = if self.unified_memory {
 370                metal::MTLStorageMode::Memoryless
 371            } else {
 372                metal::MTLStorageMode::Private
 373            };
 374
 375            let mut msaa_descriptor = texture_descriptor;
 376            msaa_descriptor.set_texture_type(metal::MTLTextureType::D2Multisample);
 377            msaa_descriptor.set_storage_mode(storage_mode);
 378            msaa_descriptor.set_sample_count(self.path_sample_count as _);
 379            self.path_intermediate_msaa_texture = Some(self.device.new_texture(&msaa_descriptor));
 380        } else {
 381            self.path_intermediate_msaa_texture = None;
 382        }
 383    }
 384
 385    pub fn update_transparency(&self, _transparent: bool) {
 386        // todo(mac)?
 387    }
 388
 389    pub fn destroy(&self) {
 390        // nothing to do
 391    }
 392
 393    pub fn draw(&mut self, scene: &Scene) {
 394        let layer = self.layer.clone();
 395        let viewport_size = layer.drawable_size();
 396        let viewport_size: Size<DevicePixels> = size(
 397            (viewport_size.width.ceil() as i32).into(),
 398            (viewport_size.height.ceil() as i32).into(),
 399        );
 400        let drawable = if let Some(drawable) = layer.next_drawable() {
 401            drawable
 402        } else {
 403            log::error!(
 404                "failed to retrieve next drawable, drawable size: {:?}",
 405                viewport_size
 406            );
 407            return;
 408        };
 409
 410        loop {
 411            let mut instance_buffer = self
 412                .instance_buffer_pool
 413                .lock()
 414                .acquire(&self.device, self.unified_memory);
 415
 416            let command_buffer =
 417                self.draw_primitives(scene, &mut instance_buffer, drawable, viewport_size);
 418
 419            match command_buffer {
 420                Ok(command_buffer) => {
 421                    let instance_buffer_pool = self.instance_buffer_pool.clone();
 422                    let instance_buffer = Cell::new(Some(instance_buffer));
 423                    let block = ConcreteBlock::new(move |_| {
 424                        if let Some(instance_buffer) = instance_buffer.take() {
 425                            instance_buffer_pool.lock().release(instance_buffer);
 426                        }
 427                    });
 428                    let block = block.copy();
 429                    command_buffer.add_completed_handler(&block);
 430
 431                    if self.presents_with_transaction {
 432                        command_buffer.commit();
 433                        command_buffer.wait_until_scheduled();
 434                        drawable.present();
 435                    } else {
 436                        command_buffer.present_drawable(drawable);
 437                        command_buffer.commit();
 438                    }
 439                    return;
 440                }
 441                Err(err) => {
 442                    log::error!(
 443                        "failed to render: {}. retrying with larger instance buffer size",
 444                        err
 445                    );
 446                    let mut instance_buffer_pool = self.instance_buffer_pool.lock();
 447                    let buffer_size = instance_buffer_pool.buffer_size;
 448                    if buffer_size >= 256 * 1024 * 1024 {
 449                        log::error!("instance buffer size grew too large: {}", buffer_size);
 450                        break;
 451                    }
 452                    instance_buffer_pool.reset(buffer_size * 2);
 453                    log::info!(
 454                        "increased instance buffer size to {}",
 455                        instance_buffer_pool.buffer_size
 456                    );
 457                }
 458            }
 459        }
 460    }
 461
 462    fn draw_primitives(
 463        &mut self,
 464        scene: &Scene,
 465        instance_buffer: &mut InstanceBuffer,
 466        drawable: &metal::MetalDrawableRef,
 467        viewport_size: Size<DevicePixels>,
 468    ) -> Result<metal::CommandBuffer> {
 469        let command_queue = self.command_queue.clone();
 470        let command_buffer = command_queue.new_command_buffer();
 471        let alpha = if self.layer.is_opaque() { 1. } else { 0. };
 472        let mut instance_offset = 0;
 473
 474        let mut command_encoder = new_command_encoder(
 475            command_buffer,
 476            drawable,
 477            viewport_size,
 478            |color_attachment| {
 479                color_attachment.set_load_action(metal::MTLLoadAction::Clear);
 480                color_attachment.set_clear_color(metal::MTLClearColor::new(0., 0., 0., alpha));
 481            },
 482        );
 483
 484        for batch in scene.batches() {
 485            let ok = match batch {
 486                PrimitiveBatch::Shadows(shadows) => self.draw_shadows(
 487                    shadows,
 488                    instance_buffer,
 489                    &mut instance_offset,
 490                    viewport_size,
 491                    command_encoder,
 492                ),
 493                PrimitiveBatch::Quads(quads) => self.draw_quads(
 494                    quads,
 495                    instance_buffer,
 496                    &mut instance_offset,
 497                    viewport_size,
 498                    command_encoder,
 499                ),
 500                PrimitiveBatch::Paths(paths) => {
 501                    command_encoder.end_encoding();
 502
 503                    let did_draw = self.draw_paths_to_intermediate(
 504                        paths,
 505                        instance_buffer,
 506                        &mut instance_offset,
 507                        viewport_size,
 508                        command_buffer,
 509                    );
 510
 511                    command_encoder = new_command_encoder(
 512                        command_buffer,
 513                        drawable,
 514                        viewport_size,
 515                        |color_attachment| {
 516                            color_attachment.set_load_action(metal::MTLLoadAction::Load);
 517                        },
 518                    );
 519
 520                    if did_draw {
 521                        self.draw_paths_from_intermediate(
 522                            paths,
 523                            instance_buffer,
 524                            &mut instance_offset,
 525                            viewport_size,
 526                            command_encoder,
 527                        )
 528                    } else {
 529                        false
 530                    }
 531                }
 532                PrimitiveBatch::Underlines(underlines) => self.draw_underlines(
 533                    underlines,
 534                    instance_buffer,
 535                    &mut instance_offset,
 536                    viewport_size,
 537                    command_encoder,
 538                ),
 539                PrimitiveBatch::MonochromeSprites {
 540                    texture_id,
 541                    sprites,
 542                } => self.draw_monochrome_sprites(
 543                    texture_id,
 544                    sprites,
 545                    instance_buffer,
 546                    &mut instance_offset,
 547                    viewport_size,
 548                    command_encoder,
 549                ),
 550                PrimitiveBatch::PolychromeSprites {
 551                    texture_id,
 552                    sprites,
 553                } => self.draw_polychrome_sprites(
 554                    texture_id,
 555                    sprites,
 556                    instance_buffer,
 557                    &mut instance_offset,
 558                    viewport_size,
 559                    command_encoder,
 560                ),
 561                PrimitiveBatch::Surfaces(surfaces) => self.draw_surfaces(
 562                    surfaces,
 563                    instance_buffer,
 564                    &mut instance_offset,
 565                    viewport_size,
 566                    command_encoder,
 567                ),
 568            };
 569            if !ok {
 570                command_encoder.end_encoding();
 571                anyhow::bail!(
 572                    "scene too large: {} paths, {} shadows, {} quads, {} underlines, {} mono, {} poly, {} surfaces",
 573                    scene.paths.len(),
 574                    scene.shadows.len(),
 575                    scene.quads.len(),
 576                    scene.underlines.len(),
 577                    scene.monochrome_sprites.len(),
 578                    scene.polychrome_sprites.len(),
 579                    scene.surfaces.len(),
 580                );
 581            }
 582        }
 583
 584        command_encoder.end_encoding();
 585
 586        if !self.unified_memory {
 587            // Sync the instance buffer to the GPU
 588            instance_buffer.metal_buffer.did_modify_range(NSRange {
 589                location: 0,
 590                length: instance_offset as NSUInteger,
 591            });
 592        }
 593
 594        Ok(command_buffer.to_owned())
 595    }
 596
 597    fn draw_paths_to_intermediate(
 598        &self,
 599        paths: &[Path<ScaledPixels>],
 600        instance_buffer: &mut InstanceBuffer,
 601        instance_offset: &mut usize,
 602        viewport_size: Size<DevicePixels>,
 603        command_buffer: &metal::CommandBufferRef,
 604    ) -> bool {
 605        if paths.is_empty() {
 606            return true;
 607        }
 608        let Some(intermediate_texture) = &self.path_intermediate_texture else {
 609            return false;
 610        };
 611
 612        let render_pass_descriptor = metal::RenderPassDescriptor::new();
 613        let color_attachment = render_pass_descriptor
 614            .color_attachments()
 615            .object_at(0)
 616            .unwrap();
 617        color_attachment.set_load_action(metal::MTLLoadAction::Clear);
 618        color_attachment.set_clear_color(metal::MTLClearColor::new(0., 0., 0., 0.));
 619
 620        if let Some(msaa_texture) = &self.path_intermediate_msaa_texture {
 621            color_attachment.set_texture(Some(msaa_texture));
 622            color_attachment.set_resolve_texture(Some(intermediate_texture));
 623            color_attachment.set_store_action(metal::MTLStoreAction::MultisampleResolve);
 624        } else {
 625            color_attachment.set_texture(Some(intermediate_texture));
 626            color_attachment.set_store_action(metal::MTLStoreAction::Store);
 627        }
 628
 629        let command_encoder = command_buffer.new_render_command_encoder(render_pass_descriptor);
 630        command_encoder.set_render_pipeline_state(&self.paths_rasterization_pipeline_state);
 631
 632        align_offset(instance_offset);
 633        let mut vertices = Vec::new();
 634        for path in paths {
 635            vertices.extend(path.vertices.iter().map(|v| PathRasterizationVertex {
 636                xy_position: v.xy_position,
 637                st_position: v.st_position,
 638                color: path.color,
 639                bounds: path.bounds.intersect(&path.content_mask.bounds),
 640            }));
 641        }
 642        let vertices_bytes_len = mem::size_of_val(vertices.as_slice());
 643        let next_offset = *instance_offset + vertices_bytes_len;
 644        if next_offset > instance_buffer.size {
 645            command_encoder.end_encoding();
 646            return false;
 647        }
 648        command_encoder.set_vertex_buffer(
 649            PathRasterizationInputIndex::Vertices as u64,
 650            Some(&instance_buffer.metal_buffer),
 651            *instance_offset as u64,
 652        );
 653        command_encoder.set_vertex_bytes(
 654            PathRasterizationInputIndex::ViewportSize as u64,
 655            mem::size_of_val(&viewport_size) as u64,
 656            &viewport_size as *const Size<DevicePixels> as *const _,
 657        );
 658        command_encoder.set_fragment_buffer(
 659            PathRasterizationInputIndex::Vertices as u64,
 660            Some(&instance_buffer.metal_buffer),
 661            *instance_offset as u64,
 662        );
 663        let buffer_contents =
 664            unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
 665        unsafe {
 666            ptr::copy_nonoverlapping(
 667                vertices.as_ptr() as *const u8,
 668                buffer_contents,
 669                vertices_bytes_len,
 670            );
 671        }
 672        command_encoder.draw_primitives(
 673            metal::MTLPrimitiveType::Triangle,
 674            0,
 675            vertices.len() as u64,
 676        );
 677        *instance_offset = next_offset;
 678
 679        command_encoder.end_encoding();
 680        true
 681    }
 682
 683    fn draw_shadows(
 684        &self,
 685        shadows: &[Shadow],
 686        instance_buffer: &mut InstanceBuffer,
 687        instance_offset: &mut usize,
 688        viewport_size: Size<DevicePixels>,
 689        command_encoder: &metal::RenderCommandEncoderRef,
 690    ) -> bool {
 691        if shadows.is_empty() {
 692            return true;
 693        }
 694        align_offset(instance_offset);
 695
 696        command_encoder.set_render_pipeline_state(&self.shadows_pipeline_state);
 697        command_encoder.set_vertex_buffer(
 698            ShadowInputIndex::Vertices as u64,
 699            Some(&self.unit_vertices),
 700            0,
 701        );
 702        command_encoder.set_vertex_buffer(
 703            ShadowInputIndex::Shadows as u64,
 704            Some(&instance_buffer.metal_buffer),
 705            *instance_offset as u64,
 706        );
 707        command_encoder.set_fragment_buffer(
 708            ShadowInputIndex::Shadows as u64,
 709            Some(&instance_buffer.metal_buffer),
 710            *instance_offset as u64,
 711        );
 712
 713        command_encoder.set_vertex_bytes(
 714            ShadowInputIndex::ViewportSize as u64,
 715            mem::size_of_val(&viewport_size) as u64,
 716            &viewport_size as *const Size<DevicePixels> as *const _,
 717        );
 718
 719        let shadow_bytes_len = mem::size_of_val(shadows);
 720        let buffer_contents =
 721            unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
 722
 723        let next_offset = *instance_offset + shadow_bytes_len;
 724        if next_offset > instance_buffer.size {
 725            return false;
 726        }
 727
 728        unsafe {
 729            ptr::copy_nonoverlapping(
 730                shadows.as_ptr() as *const u8,
 731                buffer_contents,
 732                shadow_bytes_len,
 733            );
 734        }
 735
 736        command_encoder.draw_primitives_instanced(
 737            metal::MTLPrimitiveType::Triangle,
 738            0,
 739            6,
 740            shadows.len() as u64,
 741        );
 742        *instance_offset = next_offset;
 743        true
 744    }
 745
 746    fn draw_quads(
 747        &self,
 748        quads: &[Quad],
 749        instance_buffer: &mut InstanceBuffer,
 750        instance_offset: &mut usize,
 751        viewport_size: Size<DevicePixels>,
 752        command_encoder: &metal::RenderCommandEncoderRef,
 753    ) -> bool {
 754        if quads.is_empty() {
 755            return true;
 756        }
 757        align_offset(instance_offset);
 758
 759        command_encoder.set_render_pipeline_state(&self.quads_pipeline_state);
 760        command_encoder.set_vertex_buffer(
 761            QuadInputIndex::Vertices as u64,
 762            Some(&self.unit_vertices),
 763            0,
 764        );
 765        command_encoder.set_vertex_buffer(
 766            QuadInputIndex::Quads as u64,
 767            Some(&instance_buffer.metal_buffer),
 768            *instance_offset as u64,
 769        );
 770        command_encoder.set_fragment_buffer(
 771            QuadInputIndex::Quads as u64,
 772            Some(&instance_buffer.metal_buffer),
 773            *instance_offset as u64,
 774        );
 775
 776        command_encoder.set_vertex_bytes(
 777            QuadInputIndex::ViewportSize as u64,
 778            mem::size_of_val(&viewport_size) as u64,
 779            &viewport_size as *const Size<DevicePixels> as *const _,
 780        );
 781
 782        let quad_bytes_len = mem::size_of_val(quads);
 783        let buffer_contents =
 784            unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
 785
 786        let next_offset = *instance_offset + quad_bytes_len;
 787        if next_offset > instance_buffer.size {
 788            return false;
 789        }
 790
 791        unsafe {
 792            ptr::copy_nonoverlapping(quads.as_ptr() as *const u8, buffer_contents, quad_bytes_len);
 793        }
 794
 795        command_encoder.draw_primitives_instanced(
 796            metal::MTLPrimitiveType::Triangle,
 797            0,
 798            6,
 799            quads.len() as u64,
 800        );
 801        *instance_offset = next_offset;
 802        true
 803    }
 804
 805    fn draw_paths_from_intermediate(
 806        &self,
 807        paths: &[Path<ScaledPixels>],
 808        instance_buffer: &mut InstanceBuffer,
 809        instance_offset: &mut usize,
 810        viewport_size: Size<DevicePixels>,
 811        command_encoder: &metal::RenderCommandEncoderRef,
 812    ) -> bool {
 813        let Some(first_path) = paths.first() else {
 814            return true;
 815        };
 816
 817        let Some(ref intermediate_texture) = self.path_intermediate_texture else {
 818            return false;
 819        };
 820
 821        command_encoder.set_render_pipeline_state(&self.path_sprites_pipeline_state);
 822        command_encoder.set_vertex_buffer(
 823            SpriteInputIndex::Vertices as u64,
 824            Some(&self.unit_vertices),
 825            0,
 826        );
 827        command_encoder.set_vertex_bytes(
 828            SpriteInputIndex::ViewportSize as u64,
 829            mem::size_of_val(&viewport_size) as u64,
 830            &viewport_size as *const Size<DevicePixels> as *const _,
 831        );
 832
 833        command_encoder.set_fragment_texture(
 834            SpriteInputIndex::AtlasTexture as u64,
 835            Some(intermediate_texture),
 836        );
 837
 838        // When copying paths from the intermediate texture to the drawable,
 839        // each pixel must only be copied once, in case of transparent paths.
 840        //
 841        // If all paths have the same draw order, then their bounds are all
 842        // disjoint, so we can copy each path's bounds individually. If this
 843        // batch combines different draw orders, we perform a single copy
 844        // for a minimal spanning rect.
 845        let sprites;
 846        if paths.last().unwrap().order == first_path.order {
 847            sprites = paths
 848                .iter()
 849                .map(|path| PathSprite {
 850                    bounds: path.clipped_bounds(),
 851                })
 852                .collect();
 853        } else {
 854            let mut bounds = first_path.clipped_bounds();
 855            for path in paths.iter().skip(1) {
 856                bounds = bounds.union(&path.clipped_bounds());
 857            }
 858            sprites = vec![PathSprite { bounds }];
 859        }
 860
 861        align_offset(instance_offset);
 862        let sprite_bytes_len = mem::size_of_val(sprites.as_slice());
 863        let next_offset = *instance_offset + sprite_bytes_len;
 864        if next_offset > instance_buffer.size {
 865            return false;
 866        }
 867
 868        command_encoder.set_vertex_buffer(
 869            SpriteInputIndex::Sprites as u64,
 870            Some(&instance_buffer.metal_buffer),
 871            *instance_offset as u64,
 872        );
 873
 874        let buffer_contents =
 875            unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
 876        unsafe {
 877            ptr::copy_nonoverlapping(
 878                sprites.as_ptr() as *const u8,
 879                buffer_contents,
 880                sprite_bytes_len,
 881            );
 882        }
 883
 884        command_encoder.draw_primitives_instanced(
 885            metal::MTLPrimitiveType::Triangle,
 886            0,
 887            6,
 888            sprites.len() as u64,
 889        );
 890        *instance_offset = next_offset;
 891
 892        true
 893    }
 894
 895    fn draw_underlines(
 896        &self,
 897        underlines: &[Underline],
 898        instance_buffer: &mut InstanceBuffer,
 899        instance_offset: &mut usize,
 900        viewport_size: Size<DevicePixels>,
 901        command_encoder: &metal::RenderCommandEncoderRef,
 902    ) -> bool {
 903        if underlines.is_empty() {
 904            return true;
 905        }
 906        align_offset(instance_offset);
 907
 908        command_encoder.set_render_pipeline_state(&self.underlines_pipeline_state);
 909        command_encoder.set_vertex_buffer(
 910            UnderlineInputIndex::Vertices as u64,
 911            Some(&self.unit_vertices),
 912            0,
 913        );
 914        command_encoder.set_vertex_buffer(
 915            UnderlineInputIndex::Underlines as u64,
 916            Some(&instance_buffer.metal_buffer),
 917            *instance_offset as u64,
 918        );
 919        command_encoder.set_fragment_buffer(
 920            UnderlineInputIndex::Underlines as u64,
 921            Some(&instance_buffer.metal_buffer),
 922            *instance_offset as u64,
 923        );
 924
 925        command_encoder.set_vertex_bytes(
 926            UnderlineInputIndex::ViewportSize as u64,
 927            mem::size_of_val(&viewport_size) as u64,
 928            &viewport_size as *const Size<DevicePixels> as *const _,
 929        );
 930
 931        let underline_bytes_len = mem::size_of_val(underlines);
 932        let buffer_contents =
 933            unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
 934
 935        let next_offset = *instance_offset + underline_bytes_len;
 936        if next_offset > instance_buffer.size {
 937            return false;
 938        }
 939
 940        unsafe {
 941            ptr::copy_nonoverlapping(
 942                underlines.as_ptr() as *const u8,
 943                buffer_contents,
 944                underline_bytes_len,
 945            );
 946        }
 947
 948        command_encoder.draw_primitives_instanced(
 949            metal::MTLPrimitiveType::Triangle,
 950            0,
 951            6,
 952            underlines.len() as u64,
 953        );
 954        *instance_offset = next_offset;
 955        true
 956    }
 957
 958    fn draw_monochrome_sprites(
 959        &self,
 960        texture_id: AtlasTextureId,
 961        sprites: &[MonochromeSprite],
 962        instance_buffer: &mut InstanceBuffer,
 963        instance_offset: &mut usize,
 964        viewport_size: Size<DevicePixels>,
 965        command_encoder: &metal::RenderCommandEncoderRef,
 966    ) -> bool {
 967        if sprites.is_empty() {
 968            return true;
 969        }
 970        align_offset(instance_offset);
 971
 972        let sprite_bytes_len = mem::size_of_val(sprites);
 973        let buffer_contents =
 974            unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
 975
 976        let next_offset = *instance_offset + sprite_bytes_len;
 977        if next_offset > instance_buffer.size {
 978            return false;
 979        }
 980
 981        let texture = self.sprite_atlas.metal_texture(texture_id);
 982        let texture_size = size(
 983            DevicePixels(texture.width() as i32),
 984            DevicePixels(texture.height() as i32),
 985        );
 986        command_encoder.set_render_pipeline_state(&self.monochrome_sprites_pipeline_state);
 987        command_encoder.set_vertex_buffer(
 988            SpriteInputIndex::Vertices as u64,
 989            Some(&self.unit_vertices),
 990            0,
 991        );
 992        command_encoder.set_vertex_buffer(
 993            SpriteInputIndex::Sprites as u64,
 994            Some(&instance_buffer.metal_buffer),
 995            *instance_offset as u64,
 996        );
 997        command_encoder.set_vertex_bytes(
 998            SpriteInputIndex::ViewportSize as u64,
 999            mem::size_of_val(&viewport_size) as u64,
1000            &viewport_size as *const Size<DevicePixels> as *const _,
1001        );
1002        command_encoder.set_vertex_bytes(
1003            SpriteInputIndex::AtlasTextureSize as u64,
1004            mem::size_of_val(&texture_size) as u64,
1005            &texture_size as *const Size<DevicePixels> as *const _,
1006        );
1007        command_encoder.set_fragment_buffer(
1008            SpriteInputIndex::Sprites as u64,
1009            Some(&instance_buffer.metal_buffer),
1010            *instance_offset as u64,
1011        );
1012        command_encoder.set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
1013
1014        unsafe {
1015            ptr::copy_nonoverlapping(
1016                sprites.as_ptr() as *const u8,
1017                buffer_contents,
1018                sprite_bytes_len,
1019            );
1020        }
1021
1022        command_encoder.draw_primitives_instanced(
1023            metal::MTLPrimitiveType::Triangle,
1024            0,
1025            6,
1026            sprites.len() as u64,
1027        );
1028        *instance_offset = next_offset;
1029        true
1030    }
1031
1032    fn draw_polychrome_sprites(
1033        &self,
1034        texture_id: AtlasTextureId,
1035        sprites: &[PolychromeSprite],
1036        instance_buffer: &mut InstanceBuffer,
1037        instance_offset: &mut usize,
1038        viewport_size: Size<DevicePixels>,
1039        command_encoder: &metal::RenderCommandEncoderRef,
1040    ) -> bool {
1041        if sprites.is_empty() {
1042            return true;
1043        }
1044        align_offset(instance_offset);
1045
1046        let texture = self.sprite_atlas.metal_texture(texture_id);
1047        let texture_size = size(
1048            DevicePixels(texture.width() as i32),
1049            DevicePixels(texture.height() as i32),
1050        );
1051        command_encoder.set_render_pipeline_state(&self.polychrome_sprites_pipeline_state);
1052        command_encoder.set_vertex_buffer(
1053            SpriteInputIndex::Vertices as u64,
1054            Some(&self.unit_vertices),
1055            0,
1056        );
1057        command_encoder.set_vertex_buffer(
1058            SpriteInputIndex::Sprites as u64,
1059            Some(&instance_buffer.metal_buffer),
1060            *instance_offset as u64,
1061        );
1062        command_encoder.set_vertex_bytes(
1063            SpriteInputIndex::ViewportSize as u64,
1064            mem::size_of_val(&viewport_size) as u64,
1065            &viewport_size as *const Size<DevicePixels> as *const _,
1066        );
1067        command_encoder.set_vertex_bytes(
1068            SpriteInputIndex::AtlasTextureSize as u64,
1069            mem::size_of_val(&texture_size) as u64,
1070            &texture_size as *const Size<DevicePixels> as *const _,
1071        );
1072        command_encoder.set_fragment_buffer(
1073            SpriteInputIndex::Sprites as u64,
1074            Some(&instance_buffer.metal_buffer),
1075            *instance_offset as u64,
1076        );
1077        command_encoder.set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
1078
1079        let sprite_bytes_len = mem::size_of_val(sprites);
1080        let buffer_contents =
1081            unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
1082
1083        let next_offset = *instance_offset + sprite_bytes_len;
1084        if next_offset > instance_buffer.size {
1085            return false;
1086        }
1087
1088        unsafe {
1089            ptr::copy_nonoverlapping(
1090                sprites.as_ptr() as *const u8,
1091                buffer_contents,
1092                sprite_bytes_len,
1093            );
1094        }
1095
1096        command_encoder.draw_primitives_instanced(
1097            metal::MTLPrimitiveType::Triangle,
1098            0,
1099            6,
1100            sprites.len() as u64,
1101        );
1102        *instance_offset = next_offset;
1103        true
1104    }
1105
1106    fn draw_surfaces(
1107        &mut self,
1108        surfaces: &[PaintSurface],
1109        instance_buffer: &mut InstanceBuffer,
1110        instance_offset: &mut usize,
1111        viewport_size: Size<DevicePixels>,
1112        command_encoder: &metal::RenderCommandEncoderRef,
1113    ) -> bool {
1114        command_encoder.set_render_pipeline_state(&self.surfaces_pipeline_state);
1115        command_encoder.set_vertex_buffer(
1116            SurfaceInputIndex::Vertices as u64,
1117            Some(&self.unit_vertices),
1118            0,
1119        );
1120        command_encoder.set_vertex_bytes(
1121            SurfaceInputIndex::ViewportSize as u64,
1122            mem::size_of_val(&viewport_size) as u64,
1123            &viewport_size as *const Size<DevicePixels> as *const _,
1124        );
1125
1126        for surface in surfaces {
1127            let texture_size = size(
1128                DevicePixels::from(surface.image_buffer.get_width() as i32),
1129                DevicePixels::from(surface.image_buffer.get_height() as i32),
1130            );
1131
1132            assert_eq!(
1133                surface.image_buffer.get_pixel_format(),
1134                kCVPixelFormatType_420YpCbCr8BiPlanarFullRange
1135            );
1136
1137            let y_texture = self
1138                .core_video_texture_cache
1139                .create_texture_from_image(
1140                    surface.image_buffer.as_concrete_TypeRef(),
1141                    None,
1142                    MTLPixelFormat::R8Unorm,
1143                    surface.image_buffer.get_width_of_plane(0),
1144                    surface.image_buffer.get_height_of_plane(0),
1145                    0,
1146                )
1147                .unwrap();
1148            let cb_cr_texture = self
1149                .core_video_texture_cache
1150                .create_texture_from_image(
1151                    surface.image_buffer.as_concrete_TypeRef(),
1152                    None,
1153                    MTLPixelFormat::RG8Unorm,
1154                    surface.image_buffer.get_width_of_plane(1),
1155                    surface.image_buffer.get_height_of_plane(1),
1156                    1,
1157                )
1158                .unwrap();
1159
1160            align_offset(instance_offset);
1161            let next_offset = *instance_offset + mem::size_of::<Surface>();
1162            if next_offset > instance_buffer.size {
1163                return false;
1164            }
1165
1166            command_encoder.set_vertex_buffer(
1167                SurfaceInputIndex::Surfaces as u64,
1168                Some(&instance_buffer.metal_buffer),
1169                *instance_offset as u64,
1170            );
1171            command_encoder.set_vertex_bytes(
1172                SurfaceInputIndex::TextureSize as u64,
1173                mem::size_of_val(&texture_size) as u64,
1174                &texture_size as *const Size<DevicePixels> as *const _,
1175            );
1176            // let y_texture = y_texture.get_texture().unwrap().
1177            command_encoder.set_fragment_texture(SurfaceInputIndex::YTexture as u64, unsafe {
1178                let texture = CVMetalTextureGetTexture(y_texture.as_concrete_TypeRef());
1179                Some(metal::TextureRef::from_ptr(texture as *mut _))
1180            });
1181            command_encoder.set_fragment_texture(SurfaceInputIndex::CbCrTexture as u64, unsafe {
1182                let texture = CVMetalTextureGetTexture(cb_cr_texture.as_concrete_TypeRef());
1183                Some(metal::TextureRef::from_ptr(texture as *mut _))
1184            });
1185
1186            unsafe {
1187                let buffer_contents = (instance_buffer.metal_buffer.contents() as *mut u8)
1188                    .add(*instance_offset)
1189                    as *mut SurfaceBounds;
1190                ptr::write(
1191                    buffer_contents,
1192                    SurfaceBounds {
1193                        bounds: surface.bounds,
1194                        content_mask: surface.content_mask.clone(),
1195                    },
1196                );
1197            }
1198
1199            command_encoder.draw_primitives(metal::MTLPrimitiveType::Triangle, 0, 6);
1200            *instance_offset = next_offset;
1201        }
1202        true
1203    }
1204}
1205
1206fn new_command_encoder<'a>(
1207    command_buffer: &'a metal::CommandBufferRef,
1208    drawable: &'a metal::MetalDrawableRef,
1209    viewport_size: Size<DevicePixels>,
1210    configure_color_attachment: impl Fn(&RenderPassColorAttachmentDescriptorRef),
1211) -> &'a metal::RenderCommandEncoderRef {
1212    let render_pass_descriptor = metal::RenderPassDescriptor::new();
1213    let color_attachment = render_pass_descriptor
1214        .color_attachments()
1215        .object_at(0)
1216        .unwrap();
1217    color_attachment.set_texture(Some(drawable.texture()));
1218    color_attachment.set_store_action(metal::MTLStoreAction::Store);
1219    configure_color_attachment(color_attachment);
1220
1221    let command_encoder = command_buffer.new_render_command_encoder(render_pass_descriptor);
1222    command_encoder.set_viewport(metal::MTLViewport {
1223        originX: 0.0,
1224        originY: 0.0,
1225        width: i32::from(viewport_size.width) as f64,
1226        height: i32::from(viewport_size.height) as f64,
1227        znear: 0.0,
1228        zfar: 1.0,
1229    });
1230    command_encoder
1231}
1232
1233fn build_pipeline_state(
1234    device: &metal::DeviceRef,
1235    library: &metal::LibraryRef,
1236    label: &str,
1237    vertex_fn_name: &str,
1238    fragment_fn_name: &str,
1239    pixel_format: metal::MTLPixelFormat,
1240) -> metal::RenderPipelineState {
1241    let vertex_fn = library
1242        .get_function(vertex_fn_name, None)
1243        .expect("error locating vertex function");
1244    let fragment_fn = library
1245        .get_function(fragment_fn_name, None)
1246        .expect("error locating fragment function");
1247
1248    let descriptor = metal::RenderPipelineDescriptor::new();
1249    descriptor.set_label(label);
1250    descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
1251    descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
1252    let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
1253    color_attachment.set_pixel_format(pixel_format);
1254    color_attachment.set_blending_enabled(true);
1255    color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
1256    color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
1257    color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::SourceAlpha);
1258    color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::One);
1259    color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
1260    color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::One);
1261
1262    device
1263        .new_render_pipeline_state(&descriptor)
1264        .expect("could not create render pipeline state")
1265}
1266
1267fn build_path_sprite_pipeline_state(
1268    device: &metal::DeviceRef,
1269    library: &metal::LibraryRef,
1270    label: &str,
1271    vertex_fn_name: &str,
1272    fragment_fn_name: &str,
1273    pixel_format: metal::MTLPixelFormat,
1274) -> metal::RenderPipelineState {
1275    let vertex_fn = library
1276        .get_function(vertex_fn_name, None)
1277        .expect("error locating vertex function");
1278    let fragment_fn = library
1279        .get_function(fragment_fn_name, None)
1280        .expect("error locating fragment function");
1281
1282    let descriptor = metal::RenderPipelineDescriptor::new();
1283    descriptor.set_label(label);
1284    descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
1285    descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
1286    let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
1287    color_attachment.set_pixel_format(pixel_format);
1288    color_attachment.set_blending_enabled(true);
1289    color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
1290    color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
1291    color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::One);
1292    color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::One);
1293    color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
1294    color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::One);
1295
1296    device
1297        .new_render_pipeline_state(&descriptor)
1298        .expect("could not create render pipeline state")
1299}
1300
1301fn build_path_rasterization_pipeline_state(
1302    device: &metal::DeviceRef,
1303    library: &metal::LibraryRef,
1304    label: &str,
1305    vertex_fn_name: &str,
1306    fragment_fn_name: &str,
1307    pixel_format: metal::MTLPixelFormat,
1308    path_sample_count: u32,
1309) -> metal::RenderPipelineState {
1310    let vertex_fn = library
1311        .get_function(vertex_fn_name, None)
1312        .expect("error locating vertex function");
1313    let fragment_fn = library
1314        .get_function(fragment_fn_name, None)
1315        .expect("error locating fragment function");
1316
1317    let descriptor = metal::RenderPipelineDescriptor::new();
1318    descriptor.set_label(label);
1319    descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
1320    descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
1321    if path_sample_count > 1 {
1322        descriptor.set_raster_sample_count(path_sample_count as _);
1323        descriptor.set_alpha_to_coverage_enabled(false);
1324    }
1325    let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
1326    color_attachment.set_pixel_format(pixel_format);
1327    color_attachment.set_blending_enabled(true);
1328    color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
1329    color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
1330    color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::One);
1331    color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::One);
1332    color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
1333    color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
1334
1335    device
1336        .new_render_pipeline_state(&descriptor)
1337        .expect("could not create render pipeline state")
1338}
1339
1340// Align to multiples of 256 make Metal happy.
1341fn align_offset(offset: &mut usize) {
1342    *offset = (*offset).div_ceil(256) * 256;
1343}
1344
1345#[repr(C)]
1346enum ShadowInputIndex {
1347    Vertices = 0,
1348    Shadows = 1,
1349    ViewportSize = 2,
1350}
1351
1352#[repr(C)]
1353enum QuadInputIndex {
1354    Vertices = 0,
1355    Quads = 1,
1356    ViewportSize = 2,
1357}
1358
1359#[repr(C)]
1360enum UnderlineInputIndex {
1361    Vertices = 0,
1362    Underlines = 1,
1363    ViewportSize = 2,
1364}
1365
1366#[repr(C)]
1367enum SpriteInputIndex {
1368    Vertices = 0,
1369    Sprites = 1,
1370    ViewportSize = 2,
1371    AtlasTextureSize = 3,
1372    AtlasTexture = 4,
1373}
1374
1375#[repr(C)]
1376enum SurfaceInputIndex {
1377    Vertices = 0,
1378    Surfaces = 1,
1379    ViewportSize = 2,
1380    TextureSize = 3,
1381    YTexture = 4,
1382    CbCrTexture = 5,
1383}
1384
1385#[repr(C)]
1386enum PathRasterizationInputIndex {
1387    Vertices = 0,
1388    ViewportSize = 1,
1389}
1390
1391#[derive(Clone, Debug, Eq, PartialEq)]
1392#[repr(C)]
1393pub struct PathSprite {
1394    pub bounds: Bounds<ScaledPixels>,
1395}
1396
1397#[derive(Clone, Debug, Eq, PartialEq)]
1398#[repr(C)]
1399pub struct SurfaceBounds {
1400    pub bounds: Bounds<ScaledPixels>,
1401    pub content_mask: ContentMask<ScaledPixels>,
1402}