metal_renderer.rs

   1use super::metal_atlas::MetalAtlas;
   2use crate::{
   3    AtlasTextureId, AtlasTextureKind, AtlasTile, Background, Bounds, ContentMask, DevicePixels,
   4    MonochromeSprite, PaintSurface, Path, PathId, PathVertex, PolychromeSprite, PrimitiveBatch,
   5    Quad, ScaledPixels, Scene, Shadow, Size, Surface, Underline, point, px, size,
   6};
   7use anyhow::{Context as _, Result};
   8use block::ConcreteBlock;
   9use cocoa::{
  10    base::{NO, YES},
  11    foundation::{NSSize, NSUInteger},
  12    quartzcore::AutoresizingMask,
  13};
  14use collections::HashMap;
  15use core_foundation::base::TCFType;
  16use core_video::{
  17    metal_texture::CVMetalTextureGetTexture, metal_texture_cache::CVMetalTextureCache,
  18    pixel_buffer::kCVPixelFormatType_420YpCbCr8BiPlanarFullRange,
  19};
  20use foreign_types::{ForeignType, ForeignTypeRef};
  21use metal::{CAMetalLayer, MTLPixelFormat, MTLResourceOptions, NSRange};
  22use objc::{self, msg_send, sel, sel_impl};
  23use parking_lot::Mutex;
  24use smallvec::SmallVec;
  25use std::{cell::Cell, ffi::c_void, mem, ptr, sync::Arc};
  26
  27// Exported to metal
  28pub(crate) type PointF = crate::Point<f32>;
  29
  30#[cfg(not(feature = "runtime_shaders"))]
  31const SHADERS_METALLIB: &[u8] = include_bytes!(concat!(env!("OUT_DIR"), "/shaders.metallib"));
  32#[cfg(feature = "runtime_shaders")]
  33const SHADERS_SOURCE_FILE: &str = include_str!(concat!(env!("OUT_DIR"), "/stitched_shaders.metal"));
  34// Use 4x MSAA, all devices support it.
  35// https://developer.apple.com/documentation/metal/mtldevice/1433355-supportstexturesamplecount
  36const PATH_SAMPLE_COUNT: u32 = 4;
  37
  38pub type Context = Arc<Mutex<InstanceBufferPool>>;
  39pub type Renderer = MetalRenderer;
  40
  41pub unsafe fn new_renderer(
  42    context: self::Context,
  43    _native_window: *mut c_void,
  44    _native_view: *mut c_void,
  45    _bounds: crate::Size<f32>,
  46    _transparent: bool,
  47) -> Renderer {
  48    MetalRenderer::new(context)
  49}
  50
  51pub(crate) struct InstanceBufferPool {
  52    buffer_size: usize,
  53    buffers: Vec<metal::Buffer>,
  54}
  55
  56impl Default for InstanceBufferPool {
  57    fn default() -> Self {
  58        Self {
  59            buffer_size: 2 * 1024 * 1024,
  60            buffers: Vec::new(),
  61        }
  62    }
  63}
  64
  65pub(crate) struct InstanceBuffer {
  66    metal_buffer: metal::Buffer,
  67    size: usize,
  68}
  69
  70impl InstanceBufferPool {
  71    pub(crate) fn reset(&mut self, buffer_size: usize) {
  72        self.buffer_size = buffer_size;
  73        self.buffers.clear();
  74    }
  75
  76    pub(crate) fn acquire(&mut self, device: &metal::Device) -> InstanceBuffer {
  77        let buffer = self.buffers.pop().unwrap_or_else(|| {
  78            device.new_buffer(
  79                self.buffer_size as u64,
  80                MTLResourceOptions::StorageModeManaged,
  81            )
  82        });
  83        InstanceBuffer {
  84            metal_buffer: buffer,
  85            size: self.buffer_size,
  86        }
  87    }
  88
  89    pub(crate) fn release(&mut self, buffer: InstanceBuffer) {
  90        if buffer.size == self.buffer_size {
  91            self.buffers.push(buffer.metal_buffer)
  92        }
  93    }
  94}
  95
  96pub(crate) struct MetalRenderer {
  97    device: metal::Device,
  98    layer: metal::MetalLayer,
  99    presents_with_transaction: bool,
 100    command_queue: metal::CommandQueue,
 101    paths_rasterization_pipeline_state: metal::RenderPipelineState,
 102    path_sprites_pipeline_state: metal::RenderPipelineState,
 103    shadows_pipeline_state: metal::RenderPipelineState,
 104    quads_pipeline_state: metal::RenderPipelineState,
 105    underlines_pipeline_state: metal::RenderPipelineState,
 106    monochrome_sprites_pipeline_state: metal::RenderPipelineState,
 107    polychrome_sprites_pipeline_state: metal::RenderPipelineState,
 108    surfaces_pipeline_state: metal::RenderPipelineState,
 109    unit_vertices: metal::Buffer,
 110    #[allow(clippy::arc_with_non_send_sync)]
 111    instance_buffer_pool: Arc<Mutex<InstanceBufferPool>>,
 112    sprite_atlas: Arc<MetalAtlas>,
 113    core_video_texture_cache: core_video::metal_texture_cache::CVMetalTextureCache,
 114}
 115
 116impl MetalRenderer {
 117    pub fn new(instance_buffer_pool: Arc<Mutex<InstanceBufferPool>>) -> Self {
 118        // Prefer low‐power integrated GPUs on Intel Mac. On Apple
 119        // Silicon, there is only ever one GPU, so this is equivalent to
 120        // `metal::Device::system_default()`.
 121        let mut devices = metal::Device::all();
 122        devices.sort_by_key(|device| (device.is_removable(), device.is_low_power()));
 123        let Some(device) = devices.pop() else {
 124            log::error!("unable to access a compatible graphics device");
 125            std::process::exit(1);
 126        };
 127
 128        let layer = metal::MetalLayer::new();
 129        layer.set_device(&device);
 130        layer.set_pixel_format(MTLPixelFormat::BGRA8Unorm);
 131        layer.set_opaque(false);
 132        layer.set_maximum_drawable_count(3);
 133        unsafe {
 134            let _: () = msg_send![&*layer, setAllowsNextDrawableTimeout: NO];
 135            let _: () = msg_send![&*layer, setNeedsDisplayOnBoundsChange: YES];
 136            let _: () = msg_send![
 137                &*layer,
 138                setAutoresizingMask: AutoresizingMask::WIDTH_SIZABLE
 139                    | AutoresizingMask::HEIGHT_SIZABLE
 140            ];
 141        }
 142        #[cfg(feature = "runtime_shaders")]
 143        let library = device
 144            .new_library_with_source(&SHADERS_SOURCE_FILE, &metal::CompileOptions::new())
 145            .expect("error building metal library");
 146        #[cfg(not(feature = "runtime_shaders"))]
 147        let library = device
 148            .new_library_with_data(SHADERS_METALLIB)
 149            .expect("error building metal library");
 150
 151        fn to_float2_bits(point: PointF) -> u64 {
 152            let mut output = point.y.to_bits() as u64;
 153            output <<= 32;
 154            output |= point.x.to_bits() as u64;
 155            output
 156        }
 157
 158        let unit_vertices = [
 159            to_float2_bits(point(0., 0.)),
 160            to_float2_bits(point(1., 0.)),
 161            to_float2_bits(point(0., 1.)),
 162            to_float2_bits(point(0., 1.)),
 163            to_float2_bits(point(1., 0.)),
 164            to_float2_bits(point(1., 1.)),
 165        ];
 166        let unit_vertices = device.new_buffer_with_data(
 167            unit_vertices.as_ptr() as *const c_void,
 168            mem::size_of_val(&unit_vertices) as u64,
 169            MTLResourceOptions::StorageModeManaged,
 170        );
 171
 172        let paths_rasterization_pipeline_state = build_path_rasterization_pipeline_state(
 173            &device,
 174            &library,
 175            "paths_rasterization",
 176            "path_rasterization_vertex",
 177            "path_rasterization_fragment",
 178            MTLPixelFormat::R16Float,
 179            PATH_SAMPLE_COUNT,
 180        );
 181        let path_sprites_pipeline_state = build_pipeline_state(
 182            &device,
 183            &library,
 184            "path_sprites",
 185            "path_sprite_vertex",
 186            "path_sprite_fragment",
 187            MTLPixelFormat::BGRA8Unorm,
 188        );
 189        let shadows_pipeline_state = build_pipeline_state(
 190            &device,
 191            &library,
 192            "shadows",
 193            "shadow_vertex",
 194            "shadow_fragment",
 195            MTLPixelFormat::BGRA8Unorm,
 196        );
 197        let quads_pipeline_state = build_pipeline_state(
 198            &device,
 199            &library,
 200            "quads",
 201            "quad_vertex",
 202            "quad_fragment",
 203            MTLPixelFormat::BGRA8Unorm,
 204        );
 205        let underlines_pipeline_state = build_pipeline_state(
 206            &device,
 207            &library,
 208            "underlines",
 209            "underline_vertex",
 210            "underline_fragment",
 211            MTLPixelFormat::BGRA8Unorm,
 212        );
 213        let monochrome_sprites_pipeline_state = build_pipeline_state(
 214            &device,
 215            &library,
 216            "monochrome_sprites",
 217            "monochrome_sprite_vertex",
 218            "monochrome_sprite_fragment",
 219            MTLPixelFormat::BGRA8Unorm,
 220        );
 221        let polychrome_sprites_pipeline_state = build_pipeline_state(
 222            &device,
 223            &library,
 224            "polychrome_sprites",
 225            "polychrome_sprite_vertex",
 226            "polychrome_sprite_fragment",
 227            MTLPixelFormat::BGRA8Unorm,
 228        );
 229        let surfaces_pipeline_state = build_pipeline_state(
 230            &device,
 231            &library,
 232            "surfaces",
 233            "surface_vertex",
 234            "surface_fragment",
 235            MTLPixelFormat::BGRA8Unorm,
 236        );
 237
 238        let command_queue = device.new_command_queue();
 239        let sprite_atlas = Arc::new(MetalAtlas::new(device.clone(), PATH_SAMPLE_COUNT));
 240        let core_video_texture_cache =
 241            CVMetalTextureCache::new(None, device.clone(), None).unwrap();
 242
 243        Self {
 244            device,
 245            layer,
 246            presents_with_transaction: false,
 247            command_queue,
 248            paths_rasterization_pipeline_state,
 249            path_sprites_pipeline_state,
 250            shadows_pipeline_state,
 251            quads_pipeline_state,
 252            underlines_pipeline_state,
 253            monochrome_sprites_pipeline_state,
 254            polychrome_sprites_pipeline_state,
 255            surfaces_pipeline_state,
 256            unit_vertices,
 257            instance_buffer_pool,
 258            sprite_atlas,
 259            core_video_texture_cache,
 260        }
 261    }
 262
 263    pub fn layer(&self) -> &metal::MetalLayerRef {
 264        &self.layer
 265    }
 266
 267    pub fn layer_ptr(&self) -> *mut CAMetalLayer {
 268        self.layer.as_ptr()
 269    }
 270
 271    pub fn sprite_atlas(&self) -> &Arc<MetalAtlas> {
 272        &self.sprite_atlas
 273    }
 274
 275    pub fn set_presents_with_transaction(&mut self, presents_with_transaction: bool) {
 276        self.presents_with_transaction = presents_with_transaction;
 277        self.layer
 278            .set_presents_with_transaction(presents_with_transaction);
 279    }
 280
 281    pub fn update_drawable_size(&mut self, size: Size<DevicePixels>) {
 282        let size = NSSize {
 283            width: size.width.0 as f64,
 284            height: size.height.0 as f64,
 285        };
 286        unsafe {
 287            let _: () = msg_send![
 288                self.layer(),
 289                setDrawableSize: size
 290            ];
 291        }
 292    }
 293
 294    pub fn update_transparency(&self, _transparent: bool) {
 295        // todo(mac)?
 296    }
 297
 298    pub fn destroy(&self) {
 299        // nothing to do
 300    }
 301
 302    pub fn draw(&mut self, scene: &Scene) {
 303        let layer = self.layer.clone();
 304        let viewport_size = layer.drawable_size();
 305        let viewport_size: Size<DevicePixels> = size(
 306            (viewport_size.width.ceil() as i32).into(),
 307            (viewport_size.height.ceil() as i32).into(),
 308        );
 309        let drawable = if let Some(drawable) = layer.next_drawable() {
 310            drawable
 311        } else {
 312            log::error!(
 313                "failed to retrieve next drawable, drawable size: {:?}",
 314                viewport_size
 315            );
 316            return;
 317        };
 318
 319        loop {
 320            let mut instance_buffer = self.instance_buffer_pool.lock().acquire(&self.device);
 321
 322            let command_buffer =
 323                self.draw_primitives(scene, &mut instance_buffer, drawable, viewport_size);
 324
 325            match command_buffer {
 326                Ok(command_buffer) => {
 327                    let instance_buffer_pool = self.instance_buffer_pool.clone();
 328                    let instance_buffer = Cell::new(Some(instance_buffer));
 329                    let block = ConcreteBlock::new(move |_| {
 330                        if let Some(instance_buffer) = instance_buffer.take() {
 331                            instance_buffer_pool.lock().release(instance_buffer);
 332                        }
 333                    });
 334                    let block = block.copy();
 335                    command_buffer.add_completed_handler(&block);
 336
 337                    if self.presents_with_transaction {
 338                        command_buffer.commit();
 339                        command_buffer.wait_until_scheduled();
 340                        drawable.present();
 341                    } else {
 342                        command_buffer.present_drawable(drawable);
 343                        command_buffer.commit();
 344                    }
 345                    return;
 346                }
 347                Err(err) => {
 348                    log::error!(
 349                        "failed to render: {}. retrying with larger instance buffer size",
 350                        err
 351                    );
 352                    let mut instance_buffer_pool = self.instance_buffer_pool.lock();
 353                    let buffer_size = instance_buffer_pool.buffer_size;
 354                    if buffer_size >= 256 * 1024 * 1024 {
 355                        log::error!("instance buffer size grew too large: {}", buffer_size);
 356                        break;
 357                    }
 358                    instance_buffer_pool.reset(buffer_size * 2);
 359                    log::info!(
 360                        "increased instance buffer size to {}",
 361                        instance_buffer_pool.buffer_size
 362                    );
 363                }
 364            }
 365        }
 366    }
 367
 368    fn draw_primitives(
 369        &mut self,
 370        scene: &Scene,
 371        instance_buffer: &mut InstanceBuffer,
 372        drawable: &metal::MetalDrawableRef,
 373        viewport_size: Size<DevicePixels>,
 374    ) -> Result<metal::CommandBuffer> {
 375        let command_queue = self.command_queue.clone();
 376        let command_buffer = command_queue.new_command_buffer();
 377        let mut instance_offset = 0;
 378
 379        let path_tiles = self
 380            .rasterize_paths(
 381                scene.paths(),
 382                instance_buffer,
 383                &mut instance_offset,
 384                command_buffer,
 385            )
 386            .with_context(|| format!("rasterizing {} paths", scene.paths().len()))?;
 387
 388        // Create initial render pass
 389        let render_pass_descriptor = metal::RenderPassDescriptor::new();
 390        let color_attachment = render_pass_descriptor
 391            .color_attachments()
 392            .object_at(0)
 393            .unwrap();
 394
 395        color_attachment.set_texture(Some(drawable.texture()));
 396        color_attachment.set_load_action(metal::MTLLoadAction::Clear);
 397        color_attachment.set_store_action(metal::MTLStoreAction::Store);
 398        let alpha = if self.layer.is_opaque() { 1. } else { 0. };
 399        color_attachment.set_clear_color(metal::MTLClearColor::new(0., 0., 0., alpha));
 400        let mut command_encoder = command_buffer.new_render_command_encoder(render_pass_descriptor);
 401
 402        command_encoder.set_viewport(metal::MTLViewport {
 403            originX: 0.0,
 404            originY: 0.0,
 405            width: i32::from(viewport_size.width) as f64,
 406            height: i32::from(viewport_size.height) as f64,
 407            znear: 0.0,
 408            zfar: 1.0,
 409        });
 410
 411        let mut needs_new_encoder = false;
 412
 413        // Helper to create a continuation render encoder
 414        let create_continuation_encoder = || {
 415            let render_pass_descriptor = metal::RenderPassDescriptor::new();
 416            let color_attachment = render_pass_descriptor
 417                .color_attachments()
 418                .object_at(0)
 419                .unwrap();
 420
 421            color_attachment.set_texture(Some(drawable.texture()));
 422            color_attachment.set_load_action(metal::MTLLoadAction::Load);
 423            color_attachment.set_store_action(metal::MTLStoreAction::Store);
 424
 425            let encoder = command_buffer.new_render_command_encoder(render_pass_descriptor);
 426            encoder.set_viewport(metal::MTLViewport {
 427                originX: 0.0,
 428                originY: 0.0,
 429                width: i32::from(viewport_size.width) as f64,
 430                height: i32::from(viewport_size.height) as f64,
 431                znear: 0.0,
 432                zfar: 1.0,
 433            });
 434            encoder
 435        };
 436
 437        for batch in scene.batches() {
 438            // Create a new encoder if needed
 439            if needs_new_encoder {
 440                command_encoder = create_continuation_encoder();
 441                needs_new_encoder = false;
 442            }
 443
 444            let ok = match batch {
 445                PrimitiveBatch::Shadows(shadows) => self.draw_shadows(
 446                    shadows,
 447                    instance_buffer,
 448                    &mut instance_offset,
 449                    viewport_size,
 450                    &command_encoder,
 451                ),
 452                PrimitiveBatch::Quads(quads) => self.draw_quads(
 453                    quads,
 454                    instance_buffer,
 455                    &mut instance_offset,
 456                    viewport_size,
 457                    &command_encoder,
 458                ),
 459                PrimitiveBatch::Paths(paths) => self.draw_paths(
 460                    paths,
 461                    &path_tiles,
 462                    instance_buffer,
 463                    &mut instance_offset,
 464                    viewport_size,
 465                    &command_encoder,
 466                ),
 467                PrimitiveBatch::Underlines(underlines) => self.draw_underlines(
 468                    underlines,
 469                    instance_buffer,
 470                    &mut instance_offset,
 471                    viewport_size,
 472                    &command_encoder,
 473                ),
 474                PrimitiveBatch::MonochromeSprites {
 475                    texture_id,
 476                    sprites,
 477                } => self.draw_monochrome_sprites(
 478                    texture_id,
 479                    sprites,
 480                    instance_buffer,
 481                    &mut instance_offset,
 482                    viewport_size,
 483                    &command_encoder,
 484                ),
 485                PrimitiveBatch::PolychromeSprites {
 486                    texture_id,
 487                    sprites,
 488                } => self.draw_polychrome_sprites(
 489                    texture_id,
 490                    sprites,
 491                    instance_buffer,
 492                    &mut instance_offset,
 493                    viewport_size,
 494                    &command_encoder,
 495                ),
 496                PrimitiveBatch::Surfaces(surfaces) => self.draw_surfaces(
 497                    surfaces,
 498                    instance_buffer,
 499                    &mut instance_offset,
 500                    viewport_size,
 501                    &command_encoder,
 502                ),
 503                #[cfg(target_os = "macos")]
 504                PrimitiveBatch::MetalViews(metal_views) => {
 505                    // End current render pass
 506                    command_encoder.end_encoding();
 507
 508                    // Process each MetalView
 509                    for metal_view in metal_views {
 510                        // Create a render encoder for the callback
 511                        let render_pass_descriptor = metal::RenderPassDescriptor::new();
 512                        let color_attachment = render_pass_descriptor
 513                            .color_attachments()
 514                            .object_at(0)
 515                            .unwrap();
 516
 517                        color_attachment.set_texture(Some(drawable.texture()));
 518                        color_attachment.set_load_action(metal::MTLLoadAction::Load);
 519                        color_attachment.set_store_action(metal::MTLStoreAction::Store);
 520
 521                        let callback_encoder =
 522                            command_buffer.new_render_command_encoder(render_pass_descriptor);
 523                        callback_encoder.set_viewport(metal::MTLViewport {
 524                            originX: 0.0,
 525                            originY: 0.0,
 526                            width: i32::from(viewport_size.width) as f64,
 527                            height: i32::from(viewport_size.height) as f64,
 528                            znear: 0.0,
 529                            zfar: 1.0,
 530                        });
 531
 532                        // Invoke the Metal rendering callback
 533                        let scale_factor = self.layer.contents_scale() as f32;
 534                        // Convert bounds from ScaledPixels to Pixels
 535                        let bounds = Bounds {
 536                            origin: point(
 537                                px(metal_view.bounds.origin.x.0 / scale_factor),
 538                                px(metal_view.bounds.origin.y.0 / scale_factor),
 539                            ),
 540                            size: size(
 541                                px(metal_view.bounds.size.width.0 / scale_factor),
 542                                px(metal_view.bounds.size.height.0 / scale_factor),
 543                            ),
 544                        };
 545
 546                        (metal_view.render_callback)(
 547                            &callback_encoder,
 548                            drawable.texture(),
 549                            bounds,
 550                            scale_factor,
 551                        );
 552
 553                        callback_encoder.end_encoding();
 554                    }
 555
 556                    // Mark that we'll need a new encoder for subsequent primitives
 557                    needs_new_encoder = true;
 558                    true
 559                }
 560            };
 561
 562            if !ok {
 563                command_encoder.end_encoding();
 564                anyhow::bail!(
 565                    "scene too large: {} paths, {} shadows, {} quads, {} underlines, {} mono, {} poly, {} surfaces",
 566                    scene.paths.len(),
 567                    scene.shadows.len(),
 568                    scene.quads.len(),
 569                    scene.underlines.len(),
 570                    scene.monochrome_sprites.len(),
 571                    scene.polychrome_sprites.len(),
 572                    scene.surfaces.len(),
 573                );
 574            }
 575        }
 576
 577        // End the encoder if we haven't already
 578        if !needs_new_encoder {
 579            command_encoder.end_encoding();
 580        }
 581
 582        instance_buffer.metal_buffer.did_modify_range(NSRange {
 583            location: 0,
 584            length: instance_offset as NSUInteger,
 585        });
 586        Ok(command_buffer.to_owned())
 587    }
 588
 589    fn rasterize_paths(
 590        &self,
 591        paths: &[Path<ScaledPixels>],
 592        instance_buffer: &mut InstanceBuffer,
 593        instance_offset: &mut usize,
 594        command_buffer: &metal::CommandBufferRef,
 595    ) -> Option<HashMap<PathId, AtlasTile>> {
 596        self.sprite_atlas.clear_textures(AtlasTextureKind::Path);
 597
 598        let mut tiles = HashMap::default();
 599        let mut vertices_by_texture_id = HashMap::default();
 600        for path in paths {
 601            let clipped_bounds = path.bounds.intersect(&path.content_mask.bounds);
 602
 603            let tile = self
 604                .sprite_atlas
 605                .allocate(clipped_bounds.size.map(Into::into), AtlasTextureKind::Path)?;
 606            vertices_by_texture_id
 607                .entry(tile.texture_id)
 608                .or_insert(Vec::new())
 609                .extend(path.vertices.iter().map(|vertex| PathVertex {
 610                    xy_position: vertex.xy_position - clipped_bounds.origin
 611                        + tile.bounds.origin.map(Into::into),
 612                    st_position: vertex.st_position,
 613                    content_mask: ContentMask {
 614                        bounds: tile.bounds.map(Into::into),
 615                    },
 616                }));
 617            tiles.insert(path.id, tile);
 618        }
 619
 620        for (texture_id, vertices) in vertices_by_texture_id {
 621            align_offset(instance_offset);
 622            let vertices_bytes_len = mem::size_of_val(vertices.as_slice());
 623            let next_offset = *instance_offset + vertices_bytes_len;
 624            if next_offset > instance_buffer.size {
 625                return None;
 626            }
 627
 628            let render_pass_descriptor = metal::RenderPassDescriptor::new();
 629            let color_attachment = render_pass_descriptor
 630                .color_attachments()
 631                .object_at(0)
 632                .unwrap();
 633
 634            let texture = self.sprite_atlas.metal_texture(texture_id);
 635            let msaa_texture = self.sprite_atlas.msaa_texture(texture_id);
 636
 637            if let Some(msaa_texture) = msaa_texture {
 638                color_attachment.set_texture(Some(&msaa_texture));
 639                color_attachment.set_resolve_texture(Some(&texture));
 640                color_attachment.set_load_action(metal::MTLLoadAction::Clear);
 641                color_attachment.set_store_action(metal::MTLStoreAction::MultisampleResolve);
 642            } else {
 643                color_attachment.set_texture(Some(&texture));
 644                color_attachment.set_load_action(metal::MTLLoadAction::Clear);
 645                color_attachment.set_store_action(metal::MTLStoreAction::Store);
 646            }
 647            color_attachment.set_clear_color(metal::MTLClearColor::new(0., 0., 0., 1.));
 648
 649            let command_encoder = command_buffer.new_render_command_encoder(render_pass_descriptor);
 650            command_encoder.set_render_pipeline_state(&self.paths_rasterization_pipeline_state);
 651            command_encoder.set_vertex_buffer(
 652                PathRasterizationInputIndex::Vertices as u64,
 653                Some(&instance_buffer.metal_buffer),
 654                *instance_offset as u64,
 655            );
 656            let texture_size = Size {
 657                width: DevicePixels::from(texture.width()),
 658                height: DevicePixels::from(texture.height()),
 659            };
 660            command_encoder.set_vertex_bytes(
 661                PathRasterizationInputIndex::AtlasTextureSize as u64,
 662                mem::size_of_val(&texture_size) as u64,
 663                &texture_size as *const Size<DevicePixels> as *const _,
 664            );
 665
 666            let buffer_contents = unsafe {
 667                (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset)
 668            };
 669            unsafe {
 670                ptr::copy_nonoverlapping(
 671                    vertices.as_ptr() as *const u8,
 672                    buffer_contents,
 673                    vertices_bytes_len,
 674                );
 675            }
 676
 677            command_encoder.draw_primitives(
 678                metal::MTLPrimitiveType::Triangle,
 679                0,
 680                vertices.len() as u64,
 681            );
 682            command_encoder.end_encoding();
 683            *instance_offset = next_offset;
 684        }
 685
 686        Some(tiles)
 687    }
 688
 689    fn draw_shadows(
 690        &self,
 691        shadows: &[Shadow],
 692        instance_buffer: &mut InstanceBuffer,
 693        instance_offset: &mut usize,
 694        viewport_size: Size<DevicePixels>,
 695        command_encoder: &metal::RenderCommandEncoderRef,
 696    ) -> bool {
 697        if shadows.is_empty() {
 698            return true;
 699        }
 700        align_offset(instance_offset);
 701
 702        command_encoder.set_render_pipeline_state(&self.shadows_pipeline_state);
 703        command_encoder.set_vertex_buffer(
 704            ShadowInputIndex::Vertices as u64,
 705            Some(&self.unit_vertices),
 706            0,
 707        );
 708        command_encoder.set_vertex_buffer(
 709            ShadowInputIndex::Shadows as u64,
 710            Some(&instance_buffer.metal_buffer),
 711            *instance_offset as u64,
 712        );
 713        command_encoder.set_fragment_buffer(
 714            ShadowInputIndex::Shadows as u64,
 715            Some(&instance_buffer.metal_buffer),
 716            *instance_offset as u64,
 717        );
 718
 719        command_encoder.set_vertex_bytes(
 720            ShadowInputIndex::ViewportSize as u64,
 721            mem::size_of_val(&viewport_size) as u64,
 722            &viewport_size as *const Size<DevicePixels> as *const _,
 723        );
 724
 725        let shadow_bytes_len = mem::size_of_val(shadows);
 726        let buffer_contents =
 727            unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
 728
 729        let next_offset = *instance_offset + shadow_bytes_len;
 730        if next_offset > instance_buffer.size {
 731            return false;
 732        }
 733
 734        unsafe {
 735            ptr::copy_nonoverlapping(
 736                shadows.as_ptr() as *const u8,
 737                buffer_contents,
 738                shadow_bytes_len,
 739            );
 740        }
 741
 742        command_encoder.draw_primitives_instanced(
 743            metal::MTLPrimitiveType::Triangle,
 744            0,
 745            6,
 746            shadows.len() as u64,
 747        );
 748        *instance_offset = next_offset;
 749        true
 750    }
 751
 752    fn draw_quads(
 753        &self,
 754        quads: &[Quad],
 755        instance_buffer: &mut InstanceBuffer,
 756        instance_offset: &mut usize,
 757        viewport_size: Size<DevicePixels>,
 758        command_encoder: &metal::RenderCommandEncoderRef,
 759    ) -> bool {
 760        if quads.is_empty() {
 761            return true;
 762        }
 763        align_offset(instance_offset);
 764
 765        command_encoder.set_render_pipeline_state(&self.quads_pipeline_state);
 766        command_encoder.set_vertex_buffer(
 767            QuadInputIndex::Vertices as u64,
 768            Some(&self.unit_vertices),
 769            0,
 770        );
 771        command_encoder.set_vertex_buffer(
 772            QuadInputIndex::Quads as u64,
 773            Some(&instance_buffer.metal_buffer),
 774            *instance_offset as u64,
 775        );
 776        command_encoder.set_fragment_buffer(
 777            QuadInputIndex::Quads as u64,
 778            Some(&instance_buffer.metal_buffer),
 779            *instance_offset as u64,
 780        );
 781
 782        command_encoder.set_vertex_bytes(
 783            QuadInputIndex::ViewportSize as u64,
 784            mem::size_of_val(&viewport_size) as u64,
 785            &viewport_size as *const Size<DevicePixels> as *const _,
 786        );
 787
 788        let quad_bytes_len = mem::size_of_val(quads);
 789        let buffer_contents =
 790            unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
 791
 792        let next_offset = *instance_offset + quad_bytes_len;
 793        if next_offset > instance_buffer.size {
 794            return false;
 795        }
 796
 797        unsafe {
 798            ptr::copy_nonoverlapping(quads.as_ptr() as *const u8, buffer_contents, quad_bytes_len);
 799        }
 800
 801        command_encoder.draw_primitives_instanced(
 802            metal::MTLPrimitiveType::Triangle,
 803            0,
 804            6,
 805            quads.len() as u64,
 806        );
 807        *instance_offset = next_offset;
 808        true
 809    }
 810
 811    fn draw_paths(
 812        &self,
 813        paths: &[Path<ScaledPixels>],
 814        tiles_by_path_id: &HashMap<PathId, AtlasTile>,
 815        instance_buffer: &mut InstanceBuffer,
 816        instance_offset: &mut usize,
 817        viewport_size: Size<DevicePixels>,
 818        command_encoder: &metal::RenderCommandEncoderRef,
 819    ) -> bool {
 820        if paths.is_empty() {
 821            return true;
 822        }
 823
 824        command_encoder.set_render_pipeline_state(&self.path_sprites_pipeline_state);
 825        command_encoder.set_vertex_buffer(
 826            SpriteInputIndex::Vertices as u64,
 827            Some(&self.unit_vertices),
 828            0,
 829        );
 830        command_encoder.set_vertex_bytes(
 831            SpriteInputIndex::ViewportSize as u64,
 832            mem::size_of_val(&viewport_size) as u64,
 833            &viewport_size as *const Size<DevicePixels> as *const _,
 834        );
 835
 836        let mut prev_texture_id = None;
 837        let mut sprites = SmallVec::<[_; 1]>::new();
 838        let mut paths_and_tiles = paths
 839            .iter()
 840            .map(|path| (path, tiles_by_path_id.get(&path.id).unwrap()))
 841            .peekable();
 842
 843        loop {
 844            if let Some((path, tile)) = paths_and_tiles.peek() {
 845                if prev_texture_id.map_or(true, |texture_id| texture_id == tile.texture_id) {
 846                    prev_texture_id = Some(tile.texture_id);
 847                    let origin = path.bounds.intersect(&path.content_mask.bounds).origin;
 848                    sprites.push(PathSprite {
 849                        bounds: Bounds {
 850                            origin: origin.map(|p| p.floor()),
 851                            size: tile.bounds.size.map(Into::into),
 852                        },
 853                        color: path.color,
 854                        tile: (*tile).clone(),
 855                    });
 856                    paths_and_tiles.next();
 857                    continue;
 858                }
 859            }
 860
 861            if sprites.is_empty() {
 862                break;
 863            } else {
 864                align_offset(instance_offset);
 865                let texture_id = prev_texture_id.take().unwrap();
 866                let texture: metal::Texture = self.sprite_atlas.metal_texture(texture_id);
 867                let texture_size = size(
 868                    DevicePixels(texture.width() as i32),
 869                    DevicePixels(texture.height() as i32),
 870                );
 871
 872                command_encoder.set_vertex_buffer(
 873                    SpriteInputIndex::Sprites as u64,
 874                    Some(&instance_buffer.metal_buffer),
 875                    *instance_offset as u64,
 876                );
 877                command_encoder.set_vertex_bytes(
 878                    SpriteInputIndex::AtlasTextureSize as u64,
 879                    mem::size_of_val(&texture_size) as u64,
 880                    &texture_size as *const Size<DevicePixels> as *const _,
 881                );
 882                command_encoder.set_fragment_buffer(
 883                    SpriteInputIndex::Sprites as u64,
 884                    Some(&instance_buffer.metal_buffer),
 885                    *instance_offset as u64,
 886                );
 887                command_encoder
 888                    .set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
 889
 890                let sprite_bytes_len = mem::size_of_val(sprites.as_slice());
 891                let next_offset = *instance_offset + sprite_bytes_len;
 892                if next_offset > instance_buffer.size {
 893                    return false;
 894                }
 895
 896                let buffer_contents = unsafe {
 897                    (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset)
 898                };
 899
 900                unsafe {
 901                    ptr::copy_nonoverlapping(
 902                        sprites.as_ptr() as *const u8,
 903                        buffer_contents,
 904                        sprite_bytes_len,
 905                    );
 906                }
 907
 908                command_encoder.draw_primitives_instanced(
 909                    metal::MTLPrimitiveType::Triangle,
 910                    0,
 911                    6,
 912                    sprites.len() as u64,
 913                );
 914                *instance_offset = next_offset;
 915                sprites.clear();
 916            }
 917        }
 918        true
 919    }
 920
 921    fn draw_underlines(
 922        &self,
 923        underlines: &[Underline],
 924        instance_buffer: &mut InstanceBuffer,
 925        instance_offset: &mut usize,
 926        viewport_size: Size<DevicePixels>,
 927        command_encoder: &metal::RenderCommandEncoderRef,
 928    ) -> bool {
 929        if underlines.is_empty() {
 930            return true;
 931        }
 932        align_offset(instance_offset);
 933
 934        command_encoder.set_render_pipeline_state(&self.underlines_pipeline_state);
 935        command_encoder.set_vertex_buffer(
 936            UnderlineInputIndex::Vertices as u64,
 937            Some(&self.unit_vertices),
 938            0,
 939        );
 940        command_encoder.set_vertex_buffer(
 941            UnderlineInputIndex::Underlines as u64,
 942            Some(&instance_buffer.metal_buffer),
 943            *instance_offset as u64,
 944        );
 945        command_encoder.set_fragment_buffer(
 946            UnderlineInputIndex::Underlines as u64,
 947            Some(&instance_buffer.metal_buffer),
 948            *instance_offset as u64,
 949        );
 950
 951        command_encoder.set_vertex_bytes(
 952            UnderlineInputIndex::ViewportSize as u64,
 953            mem::size_of_val(&viewport_size) as u64,
 954            &viewport_size as *const Size<DevicePixels> as *const _,
 955        );
 956
 957        let underline_bytes_len = mem::size_of_val(underlines);
 958        let buffer_contents =
 959            unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
 960
 961        let next_offset = *instance_offset + underline_bytes_len;
 962        if next_offset > instance_buffer.size {
 963            return false;
 964        }
 965
 966        unsafe {
 967            ptr::copy_nonoverlapping(
 968                underlines.as_ptr() as *const u8,
 969                buffer_contents,
 970                underline_bytes_len,
 971            );
 972        }
 973
 974        command_encoder.draw_primitives_instanced(
 975            metal::MTLPrimitiveType::Triangle,
 976            0,
 977            6,
 978            underlines.len() as u64,
 979        );
 980        *instance_offset = next_offset;
 981        true
 982    }
 983
 984    fn draw_monochrome_sprites(
 985        &self,
 986        texture_id: AtlasTextureId,
 987        sprites: &[MonochromeSprite],
 988        instance_buffer: &mut InstanceBuffer,
 989        instance_offset: &mut usize,
 990        viewport_size: Size<DevicePixels>,
 991        command_encoder: &metal::RenderCommandEncoderRef,
 992    ) -> bool {
 993        if sprites.is_empty() {
 994            return true;
 995        }
 996        align_offset(instance_offset);
 997
 998        let sprite_bytes_len = mem::size_of_val(sprites);
 999        let buffer_contents =
1000            unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
1001
1002        let next_offset = *instance_offset + sprite_bytes_len;
1003        if next_offset > instance_buffer.size {
1004            return false;
1005        }
1006
1007        let texture = self.sprite_atlas.metal_texture(texture_id);
1008        let texture_size = size(
1009            DevicePixels(texture.width() as i32),
1010            DevicePixels(texture.height() as i32),
1011        );
1012        command_encoder.set_render_pipeline_state(&self.monochrome_sprites_pipeline_state);
1013        command_encoder.set_vertex_buffer(
1014            SpriteInputIndex::Vertices as u64,
1015            Some(&self.unit_vertices),
1016            0,
1017        );
1018        command_encoder.set_vertex_buffer(
1019            SpriteInputIndex::Sprites as u64,
1020            Some(&instance_buffer.metal_buffer),
1021            *instance_offset as u64,
1022        );
1023        command_encoder.set_vertex_bytes(
1024            SpriteInputIndex::ViewportSize as u64,
1025            mem::size_of_val(&viewport_size) as u64,
1026            &viewport_size as *const Size<DevicePixels> as *const _,
1027        );
1028        command_encoder.set_vertex_bytes(
1029            SpriteInputIndex::AtlasTextureSize as u64,
1030            mem::size_of_val(&texture_size) as u64,
1031            &texture_size as *const Size<DevicePixels> as *const _,
1032        );
1033        command_encoder.set_fragment_buffer(
1034            SpriteInputIndex::Sprites as u64,
1035            Some(&instance_buffer.metal_buffer),
1036            *instance_offset as u64,
1037        );
1038        command_encoder.set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
1039
1040        unsafe {
1041            ptr::copy_nonoverlapping(
1042                sprites.as_ptr() as *const u8,
1043                buffer_contents,
1044                sprite_bytes_len,
1045            );
1046        }
1047
1048        command_encoder.draw_primitives_instanced(
1049            metal::MTLPrimitiveType::Triangle,
1050            0,
1051            6,
1052            sprites.len() as u64,
1053        );
1054        *instance_offset = next_offset;
1055        true
1056    }
1057
1058    fn draw_polychrome_sprites(
1059        &self,
1060        texture_id: AtlasTextureId,
1061        sprites: &[PolychromeSprite],
1062        instance_buffer: &mut InstanceBuffer,
1063        instance_offset: &mut usize,
1064        viewport_size: Size<DevicePixels>,
1065        command_encoder: &metal::RenderCommandEncoderRef,
1066    ) -> bool {
1067        if sprites.is_empty() {
1068            return true;
1069        }
1070        align_offset(instance_offset);
1071
1072        let texture = self.sprite_atlas.metal_texture(texture_id);
1073        let texture_size = size(
1074            DevicePixels(texture.width() as i32),
1075            DevicePixels(texture.height() as i32),
1076        );
1077        command_encoder.set_render_pipeline_state(&self.polychrome_sprites_pipeline_state);
1078        command_encoder.set_vertex_buffer(
1079            SpriteInputIndex::Vertices as u64,
1080            Some(&self.unit_vertices),
1081            0,
1082        );
1083        command_encoder.set_vertex_buffer(
1084            SpriteInputIndex::Sprites as u64,
1085            Some(&instance_buffer.metal_buffer),
1086            *instance_offset as u64,
1087        );
1088        command_encoder.set_vertex_bytes(
1089            SpriteInputIndex::ViewportSize as u64,
1090            mem::size_of_val(&viewport_size) as u64,
1091            &viewport_size as *const Size<DevicePixels> as *const _,
1092        );
1093        command_encoder.set_vertex_bytes(
1094            SpriteInputIndex::AtlasTextureSize as u64,
1095            mem::size_of_val(&texture_size) as u64,
1096            &texture_size as *const Size<DevicePixels> as *const _,
1097        );
1098        command_encoder.set_fragment_buffer(
1099            SpriteInputIndex::Sprites as u64,
1100            Some(&instance_buffer.metal_buffer),
1101            *instance_offset as u64,
1102        );
1103        command_encoder.set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
1104
1105        let sprite_bytes_len = mem::size_of_val(sprites);
1106        let buffer_contents =
1107            unsafe { (instance_buffer.metal_buffer.contents() as *mut u8).add(*instance_offset) };
1108
1109        let next_offset = *instance_offset + sprite_bytes_len;
1110        if next_offset > instance_buffer.size {
1111            return false;
1112        }
1113
1114        unsafe {
1115            ptr::copy_nonoverlapping(
1116                sprites.as_ptr() as *const u8,
1117                buffer_contents,
1118                sprite_bytes_len,
1119            );
1120        }
1121
1122        command_encoder.draw_primitives_instanced(
1123            metal::MTLPrimitiveType::Triangle,
1124            0,
1125            6,
1126            sprites.len() as u64,
1127        );
1128        *instance_offset = next_offset;
1129        true
1130    }
1131
1132    fn draw_surfaces(
1133        &mut self,
1134        surfaces: &[PaintSurface],
1135        instance_buffer: &mut InstanceBuffer,
1136        instance_offset: &mut usize,
1137        viewport_size: Size<DevicePixels>,
1138        command_encoder: &metal::RenderCommandEncoderRef,
1139    ) -> bool {
1140        command_encoder.set_render_pipeline_state(&self.surfaces_pipeline_state);
1141        command_encoder.set_vertex_buffer(
1142            SurfaceInputIndex::Vertices as u64,
1143            Some(&self.unit_vertices),
1144            0,
1145        );
1146        command_encoder.set_vertex_bytes(
1147            SurfaceInputIndex::ViewportSize as u64,
1148            mem::size_of_val(&viewport_size) as u64,
1149            &viewport_size as *const Size<DevicePixels> as *const _,
1150        );
1151
1152        for surface in surfaces {
1153            let texture_size = size(
1154                DevicePixels::from(surface.image_buffer.get_width() as i32),
1155                DevicePixels::from(surface.image_buffer.get_height() as i32),
1156            );
1157
1158            assert_eq!(
1159                surface.image_buffer.get_pixel_format(),
1160                kCVPixelFormatType_420YpCbCr8BiPlanarFullRange
1161            );
1162
1163            let y_texture = self
1164                .core_video_texture_cache
1165                .create_texture_from_image(
1166                    surface.image_buffer.as_concrete_TypeRef(),
1167                    None,
1168                    MTLPixelFormat::R8Unorm,
1169                    surface.image_buffer.get_width_of_plane(0),
1170                    surface.image_buffer.get_height_of_plane(0),
1171                    0,
1172                )
1173                .unwrap();
1174            let cb_cr_texture = self
1175                .core_video_texture_cache
1176                .create_texture_from_image(
1177                    surface.image_buffer.as_concrete_TypeRef(),
1178                    None,
1179                    MTLPixelFormat::RG8Unorm,
1180                    surface.image_buffer.get_width_of_plane(1),
1181                    surface.image_buffer.get_height_of_plane(1),
1182                    1,
1183                )
1184                .unwrap();
1185
1186            align_offset(instance_offset);
1187            let next_offset = *instance_offset + mem::size_of::<Surface>();
1188            if next_offset > instance_buffer.size {
1189                return false;
1190            }
1191
1192            command_encoder.set_vertex_buffer(
1193                SurfaceInputIndex::Surfaces as u64,
1194                Some(&instance_buffer.metal_buffer),
1195                *instance_offset as u64,
1196            );
1197            command_encoder.set_vertex_bytes(
1198                SurfaceInputIndex::TextureSize as u64,
1199                mem::size_of_val(&texture_size) as u64,
1200                &texture_size as *const Size<DevicePixels> as *const _,
1201            );
1202            // let y_texture = y_texture.get_texture().unwrap().
1203            command_encoder.set_fragment_texture(SurfaceInputIndex::YTexture as u64, unsafe {
1204                let texture = CVMetalTextureGetTexture(y_texture.as_concrete_TypeRef());
1205                Some(metal::TextureRef::from_ptr(texture as *mut _))
1206            });
1207            command_encoder.set_fragment_texture(SurfaceInputIndex::CbCrTexture as u64, unsafe {
1208                let texture = CVMetalTextureGetTexture(cb_cr_texture.as_concrete_TypeRef());
1209                Some(metal::TextureRef::from_ptr(texture as *mut _))
1210            });
1211
1212            unsafe {
1213                let buffer_contents = (instance_buffer.metal_buffer.contents() as *mut u8)
1214                    .add(*instance_offset)
1215                    as *mut SurfaceBounds;
1216                ptr::write(
1217                    buffer_contents,
1218                    SurfaceBounds {
1219                        bounds: surface.bounds,
1220                        content_mask: surface.content_mask.clone(),
1221                    },
1222                );
1223            }
1224
1225            command_encoder.draw_primitives(metal::MTLPrimitiveType::Triangle, 0, 6);
1226            *instance_offset = next_offset;
1227        }
1228        true
1229    }
1230
1231    // Note: draw_metal_views is no longer needed as we handle MetalViews
1232    // directly in draw_primitives with proper render pass management
1233}
1234
1235fn build_pipeline_state(
1236    device: &metal::DeviceRef,
1237    library: &metal::LibraryRef,
1238    label: &str,
1239    vertex_fn_name: &str,
1240    fragment_fn_name: &str,
1241    pixel_format: metal::MTLPixelFormat,
1242) -> metal::RenderPipelineState {
1243    let vertex_fn = library
1244        .get_function(vertex_fn_name, None)
1245        .expect("error locating vertex function");
1246    let fragment_fn = library
1247        .get_function(fragment_fn_name, None)
1248        .expect("error locating fragment function");
1249
1250    let descriptor = metal::RenderPipelineDescriptor::new();
1251    descriptor.set_label(label);
1252    descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
1253    descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
1254    let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
1255    color_attachment.set_pixel_format(pixel_format);
1256    color_attachment.set_blending_enabled(true);
1257    color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
1258    color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
1259    color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::SourceAlpha);
1260    color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::One);
1261    color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
1262    color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::One);
1263
1264    device
1265        .new_render_pipeline_state(&descriptor)
1266        .expect("could not create render pipeline state")
1267}
1268
1269fn build_path_rasterization_pipeline_state(
1270    device: &metal::DeviceRef,
1271    library: &metal::LibraryRef,
1272    label: &str,
1273    vertex_fn_name: &str,
1274    fragment_fn_name: &str,
1275    pixel_format: metal::MTLPixelFormat,
1276    path_sample_count: u32,
1277) -> metal::RenderPipelineState {
1278    let vertex_fn = library
1279        .get_function(vertex_fn_name, None)
1280        .expect("error locating vertex function");
1281    let fragment_fn = library
1282        .get_function(fragment_fn_name, None)
1283        .expect("error locating fragment function");
1284
1285    let descriptor = metal::RenderPipelineDescriptor::new();
1286    descriptor.set_label(label);
1287    descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
1288    descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
1289    if path_sample_count > 1 {
1290        descriptor.set_raster_sample_count(path_sample_count as _);
1291        descriptor.set_alpha_to_coverage_enabled(true);
1292    }
1293    let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
1294    color_attachment.set_pixel_format(pixel_format);
1295    color_attachment.set_blending_enabled(true);
1296    color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
1297    color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
1298    color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::One);
1299    color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::One);
1300    color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::One);
1301    color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::One);
1302
1303    device
1304        .new_render_pipeline_state(&descriptor)
1305        .expect("could not create render pipeline state")
1306}
1307
1308// Align to multiples of 256 make Metal happy.
1309fn align_offset(offset: &mut usize) {
1310    *offset = (*offset).div_ceil(256) * 256;
1311}
1312
1313#[repr(C)]
1314enum ShadowInputIndex {
1315    Vertices = 0,
1316    Shadows = 1,
1317    ViewportSize = 2,
1318}
1319
1320#[repr(C)]
1321enum QuadInputIndex {
1322    Vertices = 0,
1323    Quads = 1,
1324    ViewportSize = 2,
1325}
1326
1327#[repr(C)]
1328enum UnderlineInputIndex {
1329    Vertices = 0,
1330    Underlines = 1,
1331    ViewportSize = 2,
1332}
1333
1334#[repr(C)]
1335enum SpriteInputIndex {
1336    Vertices = 0,
1337    Sprites = 1,
1338    ViewportSize = 2,
1339    AtlasTextureSize = 3,
1340    AtlasTexture = 4,
1341}
1342
1343#[repr(C)]
1344enum SurfaceInputIndex {
1345    Vertices = 0,
1346    Surfaces = 1,
1347    ViewportSize = 2,
1348    TextureSize = 3,
1349    YTexture = 4,
1350    CbCrTexture = 5,
1351}
1352
1353#[repr(C)]
1354enum PathRasterizationInputIndex {
1355    Vertices = 0,
1356    AtlasTextureSize = 1,
1357}
1358
1359#[derive(Clone, Debug, Eq, PartialEq)]
1360#[repr(C)]
1361pub struct PathSprite {
1362    pub bounds: Bounds<ScaledPixels>,
1363    pub color: Background,
1364    pub tile: AtlasTile,
1365}
1366
1367#[derive(Clone, Debug, Eq, PartialEq)]
1368#[repr(C)]
1369pub struct SurfaceBounds {
1370    pub bounds: Bounds<ScaledPixels>,
1371    pub content_mask: ContentMask<ScaledPixels>,
1372}