metal_renderer.rs

   1use crate::{
   2    point, size, AtlasTextureId, AtlasTextureKind, AtlasTile, Bounds, ContentMask, DevicePixels,
   3    Hsla, MetalAtlas, MonochromeSprite, Path, PathId, PathVertex, PolychromeSprite, PrimitiveBatch,
   4    Quad, ScaledPixels, Scene, Shadow, Size, Surface, Underline,
   5};
   6use cocoa::{
   7    base::{NO, YES},
   8    foundation::NSUInteger,
   9    quartzcore::AutoresizingMask,
  10};
  11use collections::HashMap;
  12use core_foundation::base::TCFType;
  13use foreign_types::ForeignType;
  14use media::core_video::CVMetalTextureCache;
  15use metal::{CommandQueue, MTLPixelFormat, MTLResourceOptions, NSRange};
  16use objc::{self, msg_send, sel, sel_impl};
  17use smallvec::SmallVec;
  18use std::{ffi::c_void, mem, ptr, sync::Arc};
  19
  20const SHADERS_METALLIB: &[u8] = include_bytes!(concat!(env!("OUT_DIR"), "/shaders.metallib"));
  21const INSTANCE_BUFFER_SIZE: usize = 32 * 1024 * 1024; // This is an arbitrary decision. There's probably a more optimal value (maybe even we could adjust dynamically...)
  22
  23pub(crate) struct MetalRenderer {
  24    layer: metal::MetalLayer,
  25    command_queue: CommandQueue,
  26    paths_rasterization_pipeline_state: metal::RenderPipelineState,
  27    path_sprites_pipeline_state: metal::RenderPipelineState,
  28    shadows_pipeline_state: metal::RenderPipelineState,
  29    quads_pipeline_state: metal::RenderPipelineState,
  30    underlines_pipeline_state: metal::RenderPipelineState,
  31    monochrome_sprites_pipeline_state: metal::RenderPipelineState,
  32    polychrome_sprites_pipeline_state: metal::RenderPipelineState,
  33    surfaces_pipeline_state: metal::RenderPipelineState,
  34    unit_vertices: metal::Buffer,
  35    instances: metal::Buffer,
  36    sprite_atlas: Arc<MetalAtlas>,
  37    core_video_texture_cache: CVMetalTextureCache,
  38}
  39
  40impl MetalRenderer {
  41    pub fn new(is_opaque: bool) -> Self {
  42        let device: metal::Device = if let Some(device) = metal::Device::system_default() {
  43            device
  44        } else {
  45            log::error!("unable to access a compatible graphics device");
  46            std::process::exit(1);
  47        };
  48
  49        let layer = metal::MetalLayer::new();
  50        layer.set_device(&device);
  51        layer.set_pixel_format(MTLPixelFormat::BGRA8Unorm);
  52        layer.set_presents_with_transaction(true);
  53        layer.set_opaque(is_opaque);
  54        unsafe {
  55            let _: () = msg_send![&*layer, setAllowsNextDrawableTimeout: NO];
  56            let _: () = msg_send![&*layer, setNeedsDisplayOnBoundsChange: YES];
  57            let _: () = msg_send![
  58                &*layer,
  59                setAutoresizingMask: AutoresizingMask::WIDTH_SIZABLE
  60                    | AutoresizingMask::HEIGHT_SIZABLE
  61            ];
  62        }
  63
  64        let library = device
  65            .new_library_with_data(SHADERS_METALLIB)
  66            .expect("error building metal library");
  67
  68        fn to_float2_bits(point: crate::PointF) -> u64 {
  69            let mut output = point.y.to_bits() as u64;
  70            output <<= 32;
  71            output |= point.x.to_bits() as u64;
  72            output
  73        }
  74
  75        let unit_vertices = [
  76            to_float2_bits(point(0., 0.)),
  77            to_float2_bits(point(1., 0.)),
  78            to_float2_bits(point(0., 1.)),
  79            to_float2_bits(point(0., 1.)),
  80            to_float2_bits(point(1., 0.)),
  81            to_float2_bits(point(1., 1.)),
  82        ];
  83        let unit_vertices = device.new_buffer_with_data(
  84            unit_vertices.as_ptr() as *const c_void,
  85            mem::size_of_val(&unit_vertices) as u64,
  86            MTLResourceOptions::StorageModeManaged,
  87        );
  88        let instances = device.new_buffer(
  89            INSTANCE_BUFFER_SIZE as u64,
  90            MTLResourceOptions::StorageModeManaged,
  91        );
  92
  93        let paths_rasterization_pipeline_state = build_path_rasterization_pipeline_state(
  94            &device,
  95            &library,
  96            "paths_rasterization",
  97            "path_rasterization_vertex",
  98            "path_rasterization_fragment",
  99            MTLPixelFormat::R16Float,
 100        );
 101        let path_sprites_pipeline_state = build_pipeline_state(
 102            &device,
 103            &library,
 104            "path_sprites",
 105            "path_sprite_vertex",
 106            "path_sprite_fragment",
 107            MTLPixelFormat::BGRA8Unorm,
 108        );
 109        let shadows_pipeline_state = build_pipeline_state(
 110            &device,
 111            &library,
 112            "shadows",
 113            "shadow_vertex",
 114            "shadow_fragment",
 115            MTLPixelFormat::BGRA8Unorm,
 116        );
 117        let quads_pipeline_state = build_pipeline_state(
 118            &device,
 119            &library,
 120            "quads",
 121            "quad_vertex",
 122            "quad_fragment",
 123            MTLPixelFormat::BGRA8Unorm,
 124        );
 125        let underlines_pipeline_state = build_pipeline_state(
 126            &device,
 127            &library,
 128            "underlines",
 129            "underline_vertex",
 130            "underline_fragment",
 131            MTLPixelFormat::BGRA8Unorm,
 132        );
 133        let monochrome_sprites_pipeline_state = build_pipeline_state(
 134            &device,
 135            &library,
 136            "monochrome_sprites",
 137            "monochrome_sprite_vertex",
 138            "monochrome_sprite_fragment",
 139            MTLPixelFormat::BGRA8Unorm,
 140        );
 141        let polychrome_sprites_pipeline_state = build_pipeline_state(
 142            &device,
 143            &library,
 144            "polychrome_sprites",
 145            "polychrome_sprite_vertex",
 146            "polychrome_sprite_fragment",
 147            MTLPixelFormat::BGRA8Unorm,
 148        );
 149        let surfaces_pipeline_state = build_pipeline_state(
 150            &device,
 151            &library,
 152            "surfaces",
 153            "surface_vertex",
 154            "surface_fragment",
 155            MTLPixelFormat::BGRA8Unorm,
 156        );
 157
 158        let command_queue = device.new_command_queue();
 159        let sprite_atlas = Arc::new(MetalAtlas::new(device.clone()));
 160
 161        Self {
 162            layer,
 163            command_queue,
 164            paths_rasterization_pipeline_state,
 165            path_sprites_pipeline_state,
 166            shadows_pipeline_state,
 167            quads_pipeline_state,
 168            underlines_pipeline_state,
 169            monochrome_sprites_pipeline_state,
 170            polychrome_sprites_pipeline_state,
 171            surfaces_pipeline_state,
 172            unit_vertices,
 173            instances,
 174            sprite_atlas,
 175            core_video_texture_cache: unsafe { CVMetalTextureCache::new(device.as_ptr()).unwrap() },
 176        }
 177    }
 178
 179    pub fn layer(&self) -> &metal::MetalLayerRef {
 180        &self.layer
 181    }
 182
 183    pub fn sprite_atlas(&self) -> &Arc<MetalAtlas> {
 184        &self.sprite_atlas
 185    }
 186
 187    pub fn draw(&mut self, scene: &Scene) {
 188        let layer = self.layer.clone();
 189        let viewport_size = layer.drawable_size();
 190        let viewport_size: Size<DevicePixels> = size(
 191            (viewport_size.width.ceil() as i32).into(),
 192            (viewport_size.height.ceil() as i32).into(),
 193        );
 194        let drawable = if let Some(drawable) = layer.next_drawable() {
 195            drawable
 196        } else {
 197            log::error!(
 198                "failed to retrieve next drawable, drawable size: {:?}",
 199                viewport_size
 200            );
 201            return;
 202        };
 203        let command_queue = self.command_queue.clone();
 204        let command_buffer = command_queue.new_command_buffer();
 205        let mut instance_offset = 0;
 206
 207        let Some(path_tiles) =
 208            self.rasterize_paths(scene.paths(), &mut instance_offset, command_buffer)
 209        else {
 210            panic!("failed to rasterize {} paths", scene.paths().len());
 211        };
 212
 213        let render_pass_descriptor = metal::RenderPassDescriptor::new();
 214        let color_attachment = render_pass_descriptor
 215            .color_attachments()
 216            .object_at(0)
 217            .unwrap();
 218
 219        color_attachment.set_texture(Some(drawable.texture()));
 220        color_attachment.set_load_action(metal::MTLLoadAction::Clear);
 221        color_attachment.set_store_action(metal::MTLStoreAction::Store);
 222        let alpha = if self.layer.is_opaque() { 1. } else { 0. };
 223        color_attachment.set_clear_color(metal::MTLClearColor::new(0., 0., 0., alpha));
 224        let command_encoder = command_buffer.new_render_command_encoder(render_pass_descriptor);
 225
 226        command_encoder.set_viewport(metal::MTLViewport {
 227            originX: 0.0,
 228            originY: 0.0,
 229            width: i32::from(viewport_size.width) as f64,
 230            height: i32::from(viewport_size.height) as f64,
 231            znear: 0.0,
 232            zfar: 1.0,
 233        });
 234        for batch in scene.batches() {
 235            let ok = match batch {
 236                PrimitiveBatch::Shadows(shadows) => self.draw_shadows(
 237                    shadows,
 238                    &mut instance_offset,
 239                    viewport_size,
 240                    command_encoder,
 241                ),
 242                PrimitiveBatch::Quads(quads) => {
 243                    self.draw_quads(quads, &mut instance_offset, viewport_size, command_encoder)
 244                }
 245                PrimitiveBatch::Paths(paths) => self.draw_paths(
 246                    paths,
 247                    &path_tiles,
 248                    &mut instance_offset,
 249                    viewport_size,
 250                    command_encoder,
 251                ),
 252                PrimitiveBatch::Underlines(underlines) => self.draw_underlines(
 253                    underlines,
 254                    &mut instance_offset,
 255                    viewport_size,
 256                    command_encoder,
 257                ),
 258                PrimitiveBatch::MonochromeSprites {
 259                    texture_id,
 260                    sprites,
 261                } => self.draw_monochrome_sprites(
 262                    texture_id,
 263                    sprites,
 264                    &mut instance_offset,
 265                    viewport_size,
 266                    command_encoder,
 267                ),
 268                PrimitiveBatch::PolychromeSprites {
 269                    texture_id,
 270                    sprites,
 271                } => self.draw_polychrome_sprites(
 272                    texture_id,
 273                    sprites,
 274                    &mut instance_offset,
 275                    viewport_size,
 276                    command_encoder,
 277                ),
 278                PrimitiveBatch::Surfaces(surfaces) => self.draw_surfaces(
 279                    surfaces,
 280                    &mut instance_offset,
 281                    viewport_size,
 282                    command_encoder,
 283                ),
 284            };
 285
 286            if !ok {
 287                panic!("scene too large: {} paths, {} shadows, {} quads, {} underlines, {} mono, {} poly, {} surfaces",
 288                    scene.paths.len(),
 289                    scene.shadows.len(),
 290                    scene.quads.len(),
 291                    scene.underlines.len(),
 292                    scene.monochrome_sprites.len(),
 293                    scene.polychrome_sprites.len(),
 294                    scene.surfaces.len(),
 295                )
 296            }
 297        }
 298
 299        command_encoder.end_encoding();
 300
 301        self.instances.did_modify_range(NSRange {
 302            location: 0,
 303            length: instance_offset as NSUInteger,
 304        });
 305
 306        command_buffer.commit();
 307        self.sprite_atlas.clear_textures(AtlasTextureKind::Path);
 308
 309        command_buffer.wait_until_completed();
 310        drawable.present();
 311    }
 312
 313    fn rasterize_paths(
 314        &mut self,
 315        paths: &[Path<ScaledPixels>],
 316        offset: &mut usize,
 317        command_buffer: &metal::CommandBufferRef,
 318    ) -> Option<HashMap<PathId, AtlasTile>> {
 319        let mut tiles = HashMap::default();
 320        let mut vertices_by_texture_id = HashMap::default();
 321        for path in paths {
 322            let clipped_bounds = path.bounds.intersect(&path.content_mask.bounds);
 323
 324            let tile = self
 325                .sprite_atlas
 326                .allocate(clipped_bounds.size.map(Into::into), AtlasTextureKind::Path);
 327            vertices_by_texture_id
 328                .entry(tile.texture_id)
 329                .or_insert(Vec::new())
 330                .extend(path.vertices.iter().map(|vertex| PathVertex {
 331                    xy_position: vertex.xy_position - clipped_bounds.origin
 332                        + tile.bounds.origin.map(Into::into),
 333                    st_position: vertex.st_position,
 334                    content_mask: ContentMask {
 335                        bounds: tile.bounds.map(Into::into),
 336                    },
 337                }));
 338            tiles.insert(path.id, tile);
 339        }
 340
 341        for (texture_id, vertices) in vertices_by_texture_id {
 342            align_offset(offset);
 343            let vertices_bytes_len = mem::size_of_val(vertices.as_slice());
 344            let next_offset = *offset + vertices_bytes_len;
 345            if next_offset > INSTANCE_BUFFER_SIZE {
 346                return None;
 347            }
 348
 349            let render_pass_descriptor = metal::RenderPassDescriptor::new();
 350            let color_attachment = render_pass_descriptor
 351                .color_attachments()
 352                .object_at(0)
 353                .unwrap();
 354
 355            let texture = self.sprite_atlas.metal_texture(texture_id);
 356            color_attachment.set_texture(Some(&texture));
 357            color_attachment.set_load_action(metal::MTLLoadAction::Clear);
 358            color_attachment.set_store_action(metal::MTLStoreAction::Store);
 359            color_attachment.set_clear_color(metal::MTLClearColor::new(0., 0., 0., 1.));
 360            let command_encoder = command_buffer.new_render_command_encoder(render_pass_descriptor);
 361            command_encoder.set_render_pipeline_state(&self.paths_rasterization_pipeline_state);
 362            command_encoder.set_vertex_buffer(
 363                PathRasterizationInputIndex::Vertices as u64,
 364                Some(&self.instances),
 365                *offset as u64,
 366            );
 367            let texture_size = Size {
 368                width: DevicePixels::from(texture.width()),
 369                height: DevicePixels::from(texture.height()),
 370            };
 371            command_encoder.set_vertex_bytes(
 372                PathRasterizationInputIndex::AtlasTextureSize as u64,
 373                mem::size_of_val(&texture_size) as u64,
 374                &texture_size as *const Size<DevicePixels> as *const _,
 375            );
 376
 377            let buffer_contents = unsafe { (self.instances.contents() as *mut u8).add(*offset) };
 378            unsafe {
 379                ptr::copy_nonoverlapping(
 380                    vertices.as_ptr() as *const u8,
 381                    buffer_contents,
 382                    vertices_bytes_len,
 383                );
 384            }
 385
 386            command_encoder.draw_primitives(
 387                metal::MTLPrimitiveType::Triangle,
 388                0,
 389                vertices.len() as u64,
 390            );
 391            command_encoder.end_encoding();
 392            *offset = next_offset;
 393        }
 394
 395        Some(tiles)
 396    }
 397
 398    fn draw_shadows(
 399        &mut self,
 400        shadows: &[Shadow],
 401        offset: &mut usize,
 402        viewport_size: Size<DevicePixels>,
 403        command_encoder: &metal::RenderCommandEncoderRef,
 404    ) -> bool {
 405        if shadows.is_empty() {
 406            return true;
 407        }
 408        align_offset(offset);
 409
 410        command_encoder.set_render_pipeline_state(&self.shadows_pipeline_state);
 411        command_encoder.set_vertex_buffer(
 412            ShadowInputIndex::Vertices as u64,
 413            Some(&self.unit_vertices),
 414            0,
 415        );
 416        command_encoder.set_vertex_buffer(
 417            ShadowInputIndex::Shadows as u64,
 418            Some(&self.instances),
 419            *offset as u64,
 420        );
 421        command_encoder.set_fragment_buffer(
 422            ShadowInputIndex::Shadows as u64,
 423            Some(&self.instances),
 424            *offset as u64,
 425        );
 426
 427        command_encoder.set_vertex_bytes(
 428            ShadowInputIndex::ViewportSize as u64,
 429            mem::size_of_val(&viewport_size) as u64,
 430            &viewport_size as *const Size<DevicePixels> as *const _,
 431        );
 432
 433        let shadow_bytes_len = mem::size_of_val(shadows);
 434        let buffer_contents = unsafe { (self.instances.contents() as *mut u8).add(*offset) };
 435
 436        let next_offset = *offset + shadow_bytes_len;
 437        if next_offset > INSTANCE_BUFFER_SIZE {
 438            return false;
 439        }
 440
 441        unsafe {
 442            ptr::copy_nonoverlapping(
 443                shadows.as_ptr() as *const u8,
 444                buffer_contents,
 445                shadow_bytes_len,
 446            );
 447        }
 448
 449        command_encoder.draw_primitives_instanced(
 450            metal::MTLPrimitiveType::Triangle,
 451            0,
 452            6,
 453            shadows.len() as u64,
 454        );
 455        *offset = next_offset;
 456        true
 457    }
 458
 459    fn draw_quads(
 460        &mut self,
 461        quads: &[Quad],
 462        offset: &mut usize,
 463        viewport_size: Size<DevicePixels>,
 464        command_encoder: &metal::RenderCommandEncoderRef,
 465    ) -> bool {
 466        if quads.is_empty() {
 467            return true;
 468        }
 469        align_offset(offset);
 470
 471        command_encoder.set_render_pipeline_state(&self.quads_pipeline_state);
 472        command_encoder.set_vertex_buffer(
 473            QuadInputIndex::Vertices as u64,
 474            Some(&self.unit_vertices),
 475            0,
 476        );
 477        command_encoder.set_vertex_buffer(
 478            QuadInputIndex::Quads as u64,
 479            Some(&self.instances),
 480            *offset as u64,
 481        );
 482        command_encoder.set_fragment_buffer(
 483            QuadInputIndex::Quads as u64,
 484            Some(&self.instances),
 485            *offset as u64,
 486        );
 487
 488        command_encoder.set_vertex_bytes(
 489            QuadInputIndex::ViewportSize as u64,
 490            mem::size_of_val(&viewport_size) as u64,
 491            &viewport_size as *const Size<DevicePixels> as *const _,
 492        );
 493
 494        let quad_bytes_len = mem::size_of_val(quads);
 495        let buffer_contents = unsafe { (self.instances.contents() as *mut u8).add(*offset) };
 496
 497        let next_offset = *offset + quad_bytes_len;
 498        if next_offset > INSTANCE_BUFFER_SIZE {
 499            return false;
 500        }
 501
 502        unsafe {
 503            ptr::copy_nonoverlapping(quads.as_ptr() as *const u8, buffer_contents, quad_bytes_len);
 504        }
 505
 506        command_encoder.draw_primitives_instanced(
 507            metal::MTLPrimitiveType::Triangle,
 508            0,
 509            6,
 510            quads.len() as u64,
 511        );
 512        *offset = next_offset;
 513        true
 514    }
 515
 516    fn draw_paths(
 517        &mut self,
 518        paths: &[Path<ScaledPixels>],
 519        tiles_by_path_id: &HashMap<PathId, AtlasTile>,
 520        offset: &mut usize,
 521        viewport_size: Size<DevicePixels>,
 522        command_encoder: &metal::RenderCommandEncoderRef,
 523    ) -> bool {
 524        if paths.is_empty() {
 525            return true;
 526        }
 527
 528        command_encoder.set_render_pipeline_state(&self.path_sprites_pipeline_state);
 529        command_encoder.set_vertex_buffer(
 530            SpriteInputIndex::Vertices as u64,
 531            Some(&self.unit_vertices),
 532            0,
 533        );
 534        command_encoder.set_vertex_bytes(
 535            SpriteInputIndex::ViewportSize as u64,
 536            mem::size_of_val(&viewport_size) as u64,
 537            &viewport_size as *const Size<DevicePixels> as *const _,
 538        );
 539
 540        let mut prev_texture_id = None;
 541        let mut sprites = SmallVec::<[_; 1]>::new();
 542        let mut paths_and_tiles = paths
 543            .iter()
 544            .map(|path| (path, tiles_by_path_id.get(&path.id).unwrap()))
 545            .peekable();
 546
 547        loop {
 548            if let Some((path, tile)) = paths_and_tiles.peek() {
 549                if prev_texture_id.map_or(true, |texture_id| texture_id == tile.texture_id) {
 550                    prev_texture_id = Some(tile.texture_id);
 551                    let origin = path.bounds.intersect(&path.content_mask.bounds).origin;
 552                    sprites.push(PathSprite {
 553                        bounds: Bounds {
 554                            origin: origin.map(|p| p.floor()),
 555                            size: tile.bounds.size.map(Into::into),
 556                        },
 557                        color: path.color,
 558                        tile: (*tile).clone(),
 559                    });
 560                    paths_and_tiles.next();
 561                    continue;
 562                }
 563            }
 564
 565            if sprites.is_empty() {
 566                break;
 567            } else {
 568                align_offset(offset);
 569                let texture_id = prev_texture_id.take().unwrap();
 570                let texture: metal::Texture = self.sprite_atlas.metal_texture(texture_id);
 571                let texture_size = size(
 572                    DevicePixels(texture.width() as i32),
 573                    DevicePixels(texture.height() as i32),
 574                );
 575
 576                command_encoder.set_vertex_buffer(
 577                    SpriteInputIndex::Sprites as u64,
 578                    Some(&self.instances),
 579                    *offset as u64,
 580                );
 581                command_encoder.set_vertex_bytes(
 582                    SpriteInputIndex::AtlasTextureSize as u64,
 583                    mem::size_of_val(&texture_size) as u64,
 584                    &texture_size as *const Size<DevicePixels> as *const _,
 585                );
 586                command_encoder.set_fragment_buffer(
 587                    SpriteInputIndex::Sprites as u64,
 588                    Some(&self.instances),
 589                    *offset as u64,
 590                );
 591                command_encoder
 592                    .set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
 593
 594                let sprite_bytes_len = mem::size_of_val(sprites.as_slice());
 595                let next_offset = *offset + sprite_bytes_len;
 596                if next_offset > INSTANCE_BUFFER_SIZE {
 597                    return false;
 598                }
 599
 600                let buffer_contents =
 601                    unsafe { (self.instances.contents() as *mut u8).add(*offset) };
 602
 603                unsafe {
 604                    ptr::copy_nonoverlapping(
 605                        sprites.as_ptr() as *const u8,
 606                        buffer_contents,
 607                        sprite_bytes_len,
 608                    );
 609                }
 610
 611                command_encoder.draw_primitives_instanced(
 612                    metal::MTLPrimitiveType::Triangle,
 613                    0,
 614                    6,
 615                    sprites.len() as u64,
 616                );
 617                *offset = next_offset;
 618                sprites.clear();
 619            }
 620        }
 621        true
 622    }
 623
 624    fn draw_underlines(
 625        &mut self,
 626        underlines: &[Underline],
 627        offset: &mut usize,
 628        viewport_size: Size<DevicePixels>,
 629        command_encoder: &metal::RenderCommandEncoderRef,
 630    ) -> bool {
 631        if underlines.is_empty() {
 632            return true;
 633        }
 634        align_offset(offset);
 635
 636        command_encoder.set_render_pipeline_state(&self.underlines_pipeline_state);
 637        command_encoder.set_vertex_buffer(
 638            UnderlineInputIndex::Vertices as u64,
 639            Some(&self.unit_vertices),
 640            0,
 641        );
 642        command_encoder.set_vertex_buffer(
 643            UnderlineInputIndex::Underlines as u64,
 644            Some(&self.instances),
 645            *offset as u64,
 646        );
 647        command_encoder.set_fragment_buffer(
 648            UnderlineInputIndex::Underlines as u64,
 649            Some(&self.instances),
 650            *offset as u64,
 651        );
 652
 653        command_encoder.set_vertex_bytes(
 654            UnderlineInputIndex::ViewportSize as u64,
 655            mem::size_of_val(&viewport_size) as u64,
 656            &viewport_size as *const Size<DevicePixels> as *const _,
 657        );
 658
 659        let underline_bytes_len = mem::size_of_val(underlines);
 660        let buffer_contents = unsafe { (self.instances.contents() as *mut u8).add(*offset) };
 661
 662        let next_offset = *offset + underline_bytes_len;
 663        if next_offset > INSTANCE_BUFFER_SIZE {
 664            return false;
 665        }
 666
 667        unsafe {
 668            ptr::copy_nonoverlapping(
 669                underlines.as_ptr() as *const u8,
 670                buffer_contents,
 671                underline_bytes_len,
 672            );
 673        }
 674
 675        command_encoder.draw_primitives_instanced(
 676            metal::MTLPrimitiveType::Triangle,
 677            0,
 678            6,
 679            underlines.len() as u64,
 680        );
 681        *offset = next_offset;
 682        true
 683    }
 684
 685    fn draw_monochrome_sprites(
 686        &mut self,
 687        texture_id: AtlasTextureId,
 688        sprites: &[MonochromeSprite],
 689        offset: &mut usize,
 690        viewport_size: Size<DevicePixels>,
 691        command_encoder: &metal::RenderCommandEncoderRef,
 692    ) -> bool {
 693        if sprites.is_empty() {
 694            return true;
 695        }
 696        align_offset(offset);
 697
 698        let texture = self.sprite_atlas.metal_texture(texture_id);
 699        let texture_size = size(
 700            DevicePixels(texture.width() as i32),
 701            DevicePixels(texture.height() as i32),
 702        );
 703        command_encoder.set_render_pipeline_state(&self.monochrome_sprites_pipeline_state);
 704        command_encoder.set_vertex_buffer(
 705            SpriteInputIndex::Vertices as u64,
 706            Some(&self.unit_vertices),
 707            0,
 708        );
 709        command_encoder.set_vertex_buffer(
 710            SpriteInputIndex::Sprites as u64,
 711            Some(&self.instances),
 712            *offset as u64,
 713        );
 714        command_encoder.set_vertex_bytes(
 715            SpriteInputIndex::ViewportSize as u64,
 716            mem::size_of_val(&viewport_size) as u64,
 717            &viewport_size as *const Size<DevicePixels> as *const _,
 718        );
 719        command_encoder.set_vertex_bytes(
 720            SpriteInputIndex::AtlasTextureSize as u64,
 721            mem::size_of_val(&texture_size) as u64,
 722            &texture_size as *const Size<DevicePixels> as *const _,
 723        );
 724        command_encoder.set_fragment_buffer(
 725            SpriteInputIndex::Sprites as u64,
 726            Some(&self.instances),
 727            *offset as u64,
 728        );
 729        command_encoder.set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
 730
 731        let sprite_bytes_len = mem::size_of_val(sprites);
 732        let buffer_contents = unsafe { (self.instances.contents() as *mut u8).add(*offset) };
 733
 734        let next_offset = *offset + sprite_bytes_len;
 735        if next_offset > INSTANCE_BUFFER_SIZE {
 736            return false;
 737        }
 738
 739        unsafe {
 740            ptr::copy_nonoverlapping(
 741                sprites.as_ptr() as *const u8,
 742                buffer_contents,
 743                sprite_bytes_len,
 744            );
 745        }
 746
 747        command_encoder.draw_primitives_instanced(
 748            metal::MTLPrimitiveType::Triangle,
 749            0,
 750            6,
 751            sprites.len() as u64,
 752        );
 753        *offset = next_offset;
 754        true
 755    }
 756
 757    fn draw_polychrome_sprites(
 758        &mut self,
 759        texture_id: AtlasTextureId,
 760        sprites: &[PolychromeSprite],
 761        offset: &mut usize,
 762        viewport_size: Size<DevicePixels>,
 763        command_encoder: &metal::RenderCommandEncoderRef,
 764    ) -> bool {
 765        if sprites.is_empty() {
 766            return true;
 767        }
 768        align_offset(offset);
 769
 770        let texture = self.sprite_atlas.metal_texture(texture_id);
 771        let texture_size = size(
 772            DevicePixels(texture.width() as i32),
 773            DevicePixels(texture.height() as i32),
 774        );
 775        command_encoder.set_render_pipeline_state(&self.polychrome_sprites_pipeline_state);
 776        command_encoder.set_vertex_buffer(
 777            SpriteInputIndex::Vertices as u64,
 778            Some(&self.unit_vertices),
 779            0,
 780        );
 781        command_encoder.set_vertex_buffer(
 782            SpriteInputIndex::Sprites as u64,
 783            Some(&self.instances),
 784            *offset as u64,
 785        );
 786        command_encoder.set_vertex_bytes(
 787            SpriteInputIndex::ViewportSize as u64,
 788            mem::size_of_val(&viewport_size) as u64,
 789            &viewport_size as *const Size<DevicePixels> as *const _,
 790        );
 791        command_encoder.set_vertex_bytes(
 792            SpriteInputIndex::AtlasTextureSize as u64,
 793            mem::size_of_val(&texture_size) as u64,
 794            &texture_size as *const Size<DevicePixels> as *const _,
 795        );
 796        command_encoder.set_fragment_buffer(
 797            SpriteInputIndex::Sprites as u64,
 798            Some(&self.instances),
 799            *offset as u64,
 800        );
 801        command_encoder.set_fragment_texture(SpriteInputIndex::AtlasTexture as u64, Some(&texture));
 802
 803        let sprite_bytes_len = mem::size_of_val(sprites);
 804        let buffer_contents = unsafe { (self.instances.contents() as *mut u8).add(*offset) };
 805
 806        let next_offset = *offset + sprite_bytes_len;
 807        if next_offset > INSTANCE_BUFFER_SIZE {
 808            return false;
 809        }
 810
 811        unsafe {
 812            ptr::copy_nonoverlapping(
 813                sprites.as_ptr() as *const u8,
 814                buffer_contents,
 815                sprite_bytes_len,
 816            );
 817        }
 818
 819        command_encoder.draw_primitives_instanced(
 820            metal::MTLPrimitiveType::Triangle,
 821            0,
 822            6,
 823            sprites.len() as u64,
 824        );
 825        *offset = next_offset;
 826        true
 827    }
 828
 829    fn draw_surfaces(
 830        &mut self,
 831        surfaces: &[Surface],
 832        offset: &mut usize,
 833        viewport_size: Size<DevicePixels>,
 834        command_encoder: &metal::RenderCommandEncoderRef,
 835    ) -> bool {
 836        command_encoder.set_render_pipeline_state(&self.surfaces_pipeline_state);
 837        command_encoder.set_vertex_buffer(
 838            SurfaceInputIndex::Vertices as u64,
 839            Some(&self.unit_vertices),
 840            0,
 841        );
 842        command_encoder.set_vertex_bytes(
 843            SurfaceInputIndex::ViewportSize as u64,
 844            mem::size_of_val(&viewport_size) as u64,
 845            &viewport_size as *const Size<DevicePixels> as *const _,
 846        );
 847
 848        for surface in surfaces {
 849            let texture_size = size(
 850                DevicePixels::from(surface.image_buffer.width() as i32),
 851                DevicePixels::from(surface.image_buffer.height() as i32),
 852            );
 853
 854            assert_eq!(
 855                surface.image_buffer.pixel_format_type(),
 856                media::core_video::kCVPixelFormatType_420YpCbCr8BiPlanarFullRange
 857            );
 858
 859            let y_texture = unsafe {
 860                self.core_video_texture_cache
 861                    .create_texture_from_image(
 862                        surface.image_buffer.as_concrete_TypeRef(),
 863                        ptr::null(),
 864                        MTLPixelFormat::R8Unorm,
 865                        surface.image_buffer.plane_width(0),
 866                        surface.image_buffer.plane_height(0),
 867                        0,
 868                    )
 869                    .unwrap()
 870            };
 871            let cb_cr_texture = unsafe {
 872                self.core_video_texture_cache
 873                    .create_texture_from_image(
 874                        surface.image_buffer.as_concrete_TypeRef(),
 875                        ptr::null(),
 876                        MTLPixelFormat::RG8Unorm,
 877                        surface.image_buffer.plane_width(1),
 878                        surface.image_buffer.plane_height(1),
 879                        1,
 880                    )
 881                    .unwrap()
 882            };
 883
 884            align_offset(offset);
 885            let next_offset = *offset + mem::size_of::<Surface>();
 886            if next_offset > INSTANCE_BUFFER_SIZE {
 887                return false;
 888            }
 889
 890            command_encoder.set_vertex_buffer(
 891                SurfaceInputIndex::Surfaces as u64,
 892                Some(&self.instances),
 893                *offset as u64,
 894            );
 895            command_encoder.set_vertex_bytes(
 896                SurfaceInputIndex::TextureSize as u64,
 897                mem::size_of_val(&texture_size) as u64,
 898                &texture_size as *const Size<DevicePixels> as *const _,
 899            );
 900            command_encoder.set_fragment_texture(
 901                SurfaceInputIndex::YTexture as u64,
 902                Some(y_texture.as_texture_ref()),
 903            );
 904            command_encoder.set_fragment_texture(
 905                SurfaceInputIndex::CbCrTexture as u64,
 906                Some(cb_cr_texture.as_texture_ref()),
 907            );
 908
 909            unsafe {
 910                let buffer_contents =
 911                    (self.instances.contents() as *mut u8).add(*offset) as *mut SurfaceBounds;
 912                ptr::write(
 913                    buffer_contents,
 914                    SurfaceBounds {
 915                        bounds: surface.bounds,
 916                        content_mask: surface.content_mask.clone(),
 917                    },
 918                );
 919            }
 920
 921            command_encoder.draw_primitives(metal::MTLPrimitiveType::Triangle, 0, 6);
 922            *offset = next_offset;
 923        }
 924        true
 925    }
 926}
 927
 928fn build_pipeline_state(
 929    device: &metal::DeviceRef,
 930    library: &metal::LibraryRef,
 931    label: &str,
 932    vertex_fn_name: &str,
 933    fragment_fn_name: &str,
 934    pixel_format: metal::MTLPixelFormat,
 935) -> metal::RenderPipelineState {
 936    let vertex_fn = library
 937        .get_function(vertex_fn_name, None)
 938        .expect("error locating vertex function");
 939    let fragment_fn = library
 940        .get_function(fragment_fn_name, None)
 941        .expect("error locating fragment function");
 942
 943    let descriptor = metal::RenderPipelineDescriptor::new();
 944    descriptor.set_label(label);
 945    descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
 946    descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
 947    let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
 948    color_attachment.set_pixel_format(pixel_format);
 949    color_attachment.set_blending_enabled(true);
 950    color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
 951    color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
 952    color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::SourceAlpha);
 953    color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::One);
 954    color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
 955    color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::One);
 956
 957    device
 958        .new_render_pipeline_state(&descriptor)
 959        .expect("could not create render pipeline state")
 960}
 961
 962fn build_path_rasterization_pipeline_state(
 963    device: &metal::DeviceRef,
 964    library: &metal::LibraryRef,
 965    label: &str,
 966    vertex_fn_name: &str,
 967    fragment_fn_name: &str,
 968    pixel_format: metal::MTLPixelFormat,
 969) -> metal::RenderPipelineState {
 970    let vertex_fn = library
 971        .get_function(vertex_fn_name, None)
 972        .expect("error locating vertex function");
 973    let fragment_fn = library
 974        .get_function(fragment_fn_name, None)
 975        .expect("error locating fragment function");
 976
 977    let descriptor = metal::RenderPipelineDescriptor::new();
 978    descriptor.set_label(label);
 979    descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
 980    descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
 981    let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
 982    color_attachment.set_pixel_format(pixel_format);
 983    color_attachment.set_blending_enabled(true);
 984    color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
 985    color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
 986    color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::One);
 987    color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::One);
 988    color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::One);
 989    color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::One);
 990
 991    device
 992        .new_render_pipeline_state(&descriptor)
 993        .expect("could not create render pipeline state")
 994}
 995
 996// Align to multiples of 256 make Metal happy.
 997fn align_offset(offset: &mut usize) {
 998    *offset = ((*offset + 255) / 256) * 256;
 999}
1000
1001#[repr(C)]
1002enum ShadowInputIndex {
1003    Vertices = 0,
1004    Shadows = 1,
1005    ViewportSize = 2,
1006}
1007
1008#[repr(C)]
1009enum QuadInputIndex {
1010    Vertices = 0,
1011    Quads = 1,
1012    ViewportSize = 2,
1013}
1014
1015#[repr(C)]
1016enum UnderlineInputIndex {
1017    Vertices = 0,
1018    Underlines = 1,
1019    ViewportSize = 2,
1020}
1021
1022#[repr(C)]
1023enum SpriteInputIndex {
1024    Vertices = 0,
1025    Sprites = 1,
1026    ViewportSize = 2,
1027    AtlasTextureSize = 3,
1028    AtlasTexture = 4,
1029}
1030
1031#[repr(C)]
1032enum SurfaceInputIndex {
1033    Vertices = 0,
1034    Surfaces = 1,
1035    ViewportSize = 2,
1036    TextureSize = 3,
1037    YTexture = 4,
1038    CbCrTexture = 5,
1039}
1040
1041#[repr(C)]
1042enum PathRasterizationInputIndex {
1043    Vertices = 0,
1044    AtlasTextureSize = 1,
1045}
1046
1047#[derive(Clone, Debug, Eq, PartialEq)]
1048#[repr(C)]
1049pub struct PathSprite {
1050    pub bounds: Bounds<ScaledPixels>,
1051    pub color: Hsla,
1052    pub tile: AtlasTile,
1053}
1054
1055#[derive(Clone, Debug, Eq, PartialEq)]
1056#[repr(C)]
1057pub struct SurfaceBounds {
1058    pub bounds: Bounds<ScaledPixels>,
1059    pub content_mask: ContentMask<ScaledPixels>,
1060}