wgpu_renderer.rs

   1use crate::{WgpuAtlas, WgpuContext};
   2use bytemuck::{Pod, Zeroable};
   3use gpui::{
   4    AtlasTextureId, Background, Bounds, DevicePixels, GpuSpecs, MonochromeSprite, Path, Point,
   5    PolychromeSprite, PrimitiveBatch, Quad, ScaledPixels, Scene, Shadow, Size, SubpixelSprite,
   6    Underline, get_gamma_correction_ratios,
   7};
   8use log::warn;
   9use raw_window_handle::{HasDisplayHandle, HasWindowHandle};
  10use std::num::NonZeroU64;
  11use std::sync::Arc;
  12
  13#[repr(C)]
  14#[derive(Clone, Copy, Pod, Zeroable)]
  15struct GlobalParams {
  16    viewport_size: [f32; 2],
  17    premultiplied_alpha: u32,
  18    pad: u32,
  19}
  20
  21#[repr(C)]
  22#[derive(Clone, Copy, Pod, Zeroable)]
  23struct PodBounds {
  24    origin: [f32; 2],
  25    size: [f32; 2],
  26}
  27
  28impl From<Bounds<ScaledPixels>> for PodBounds {
  29    fn from(bounds: Bounds<ScaledPixels>) -> Self {
  30        Self {
  31            origin: [bounds.origin.x.0, bounds.origin.y.0],
  32            size: [bounds.size.width.0, bounds.size.height.0],
  33        }
  34    }
  35}
  36
  37#[repr(C)]
  38#[derive(Clone, Copy, Pod, Zeroable)]
  39struct SurfaceParams {
  40    bounds: PodBounds,
  41    content_mask: PodBounds,
  42}
  43
  44#[repr(C)]
  45#[derive(Clone, Copy, Pod, Zeroable)]
  46struct GammaParams {
  47    gamma_ratios: [f32; 4],
  48    grayscale_enhanced_contrast: f32,
  49    subpixel_enhanced_contrast: f32,
  50    _pad: [f32; 2],
  51}
  52
  53#[derive(Clone, Debug)]
  54#[repr(C)]
  55struct PathSprite {
  56    bounds: Bounds<ScaledPixels>,
  57}
  58
  59#[derive(Clone, Debug)]
  60#[repr(C)]
  61struct PathRasterizationVertex {
  62    xy_position: Point<ScaledPixels>,
  63    st_position: Point<f32>,
  64    color: Background,
  65    bounds: Bounds<ScaledPixels>,
  66}
  67
  68pub struct WgpuSurfaceConfig {
  69    pub size: Size<DevicePixels>,
  70    pub transparent: bool,
  71}
  72
  73struct WgpuPipelines {
  74    quads: wgpu::RenderPipeline,
  75    shadows: wgpu::RenderPipeline,
  76    path_rasterization: wgpu::RenderPipeline,
  77    paths: wgpu::RenderPipeline,
  78    underlines: wgpu::RenderPipeline,
  79    mono_sprites: wgpu::RenderPipeline,
  80    subpixel_sprites: Option<wgpu::RenderPipeline>,
  81    poly_sprites: wgpu::RenderPipeline,
  82    #[allow(dead_code)]
  83    surfaces: wgpu::RenderPipeline,
  84}
  85
  86struct WgpuBindGroupLayouts {
  87    globals: wgpu::BindGroupLayout,
  88    instances: wgpu::BindGroupLayout,
  89    instances_with_texture: wgpu::BindGroupLayout,
  90    surfaces: wgpu::BindGroupLayout,
  91}
  92
  93pub struct WgpuRenderer {
  94    device: Arc<wgpu::Device>,
  95    queue: Arc<wgpu::Queue>,
  96    surface: wgpu::Surface<'static>,
  97    surface_config: wgpu::SurfaceConfiguration,
  98    pipelines: WgpuPipelines,
  99    bind_group_layouts: WgpuBindGroupLayouts,
 100    atlas: Arc<WgpuAtlas>,
 101    atlas_sampler: wgpu::Sampler,
 102    globals_buffer: wgpu::Buffer,
 103    path_globals_offset: u64,
 104    gamma_offset: u64,
 105    globals_bind_group: wgpu::BindGroup,
 106    path_globals_bind_group: wgpu::BindGroup,
 107    instance_buffer: wgpu::Buffer,
 108    instance_buffer_capacity: u64,
 109    storage_buffer_alignment: u64,
 110    path_intermediate_texture: Option<wgpu::Texture>,
 111    path_intermediate_view: Option<wgpu::TextureView>,
 112    path_msaa_texture: Option<wgpu::Texture>,
 113    path_msaa_view: Option<wgpu::TextureView>,
 114    rendering_params: RenderingParameters,
 115    dual_source_blending: bool,
 116    adapter_info: wgpu::AdapterInfo,
 117    transparent_alpha_mode: wgpu::CompositeAlphaMode,
 118    opaque_alpha_mode: wgpu::CompositeAlphaMode,
 119    max_texture_size: u32,
 120}
 121
 122impl WgpuRenderer {
 123    /// Creates a new WgpuRenderer from raw window handles.
 124    ///
 125    /// # Safety
 126    /// The caller must ensure that the window handle remains valid for the lifetime
 127    /// of the returned renderer.
 128    pub fn new<W: HasWindowHandle + HasDisplayHandle>(
 129        gpu_context: &mut Option<WgpuContext>,
 130        window: &W,
 131        config: WgpuSurfaceConfig,
 132    ) -> anyhow::Result<Self> {
 133        let window_handle = window
 134            .window_handle()
 135            .map_err(|e| anyhow::anyhow!("Failed to get window handle: {e}"))?;
 136        let display_handle = window
 137            .display_handle()
 138            .map_err(|e| anyhow::anyhow!("Failed to get display handle: {e}"))?;
 139
 140        let target = wgpu::SurfaceTargetUnsafe::RawHandle {
 141            raw_display_handle: display_handle.as_raw(),
 142            raw_window_handle: window_handle.as_raw(),
 143        };
 144
 145        // Use the existing context's instance if available, otherwise create a new one.
 146        // The surface must be created with the same instance that will be used for
 147        // adapter selection, otherwise wgpu will panic.
 148        let instance = gpu_context
 149            .as_ref()
 150            .map(|ctx| ctx.instance.clone())
 151            .unwrap_or_else(WgpuContext::instance);
 152
 153        // Safety: The caller guarantees that the window handle is valid for the
 154        // lifetime of this renderer. In practice, the RawWindow struct is created
 155        // from the native window handles and the surface is dropped before the window.
 156        let surface = unsafe {
 157            instance
 158                .create_surface_unsafe(target)
 159                .map_err(|e| anyhow::anyhow!("Failed to create surface: {e}"))?
 160        };
 161
 162        let context = match gpu_context {
 163            Some(context) => {
 164                context.check_compatible_with_surface(&surface)?;
 165                context
 166            }
 167            None => gpu_context.insert(WgpuContext::new(instance, &surface)?),
 168        };
 169
 170        let surface_caps = surface.get_capabilities(&context.adapter);
 171        let preferred_formats = [
 172            wgpu::TextureFormat::Bgra8Unorm,
 173            wgpu::TextureFormat::Rgba8Unorm,
 174        ];
 175        let surface_format = preferred_formats
 176            .iter()
 177            .find(|f| surface_caps.formats.contains(f))
 178            .copied()
 179            .or_else(|| surface_caps.formats.iter().find(|f| !f.is_srgb()).copied())
 180            .or_else(|| surface_caps.formats.first().copied())
 181            .ok_or_else(|| {
 182                anyhow::anyhow!(
 183                    "Surface reports no supported texture formats for adapter {:?}",
 184                    context.adapter.get_info().name
 185                )
 186            })?;
 187
 188        let pick_alpha_mode =
 189            |preferences: &[wgpu::CompositeAlphaMode]| -> anyhow::Result<wgpu::CompositeAlphaMode> {
 190                preferences
 191                    .iter()
 192                    .find(|p| surface_caps.alpha_modes.contains(p))
 193                    .copied()
 194                    .or_else(|| surface_caps.alpha_modes.first().copied())
 195                    .ok_or_else(|| {
 196                        anyhow::anyhow!(
 197                            "Surface reports no supported alpha modes for adapter {:?}",
 198                            context.adapter.get_info().name
 199                        )
 200                    })
 201            };
 202
 203        let transparent_alpha_mode = pick_alpha_mode(&[
 204            wgpu::CompositeAlphaMode::PreMultiplied,
 205            wgpu::CompositeAlphaMode::Inherit,
 206        ])?;
 207
 208        let opaque_alpha_mode = pick_alpha_mode(&[
 209            wgpu::CompositeAlphaMode::Opaque,
 210            wgpu::CompositeAlphaMode::Inherit,
 211        ])?;
 212
 213        let alpha_mode = if config.transparent {
 214            transparent_alpha_mode
 215        } else {
 216            opaque_alpha_mode
 217        };
 218
 219        let device = Arc::clone(&context.device);
 220        let max_texture_size = device.limits().max_texture_dimension_2d;
 221
 222        let requested_width = config.size.width.0 as u32;
 223        let requested_height = config.size.height.0 as u32;
 224        let clamped_width = requested_width.min(max_texture_size);
 225        let clamped_height = requested_height.min(max_texture_size);
 226
 227        if clamped_width != requested_width || clamped_height != requested_height {
 228            warn!(
 229                "Requested surface size ({}, {}) exceeds maximum texture dimension {}. \
 230                 Clamping to ({}, {}). Window content may not fill the entire window.",
 231                requested_width, requested_height, max_texture_size, clamped_width, clamped_height
 232            );
 233        }
 234
 235        let surface_config = wgpu::SurfaceConfiguration {
 236            usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
 237            format: surface_format,
 238            width: clamped_width.max(1),
 239            height: clamped_height.max(1),
 240            present_mode: wgpu::PresentMode::Fifo,
 241            desired_maximum_frame_latency: 2,
 242            alpha_mode,
 243            view_formats: vec![],
 244        };
 245        surface.configure(&context.device, &surface_config);
 246
 247        let queue = Arc::clone(&context.queue);
 248        let dual_source_blending = context.supports_dual_source_blending();
 249
 250        let rendering_params = RenderingParameters::new(&context.adapter, surface_format);
 251        let bind_group_layouts = Self::create_bind_group_layouts(&device);
 252        let pipelines = Self::create_pipelines(
 253            &device,
 254            &bind_group_layouts,
 255            surface_format,
 256            alpha_mode,
 257            rendering_params.path_sample_count,
 258            dual_source_blending,
 259        );
 260
 261        let atlas = Arc::new(WgpuAtlas::new(Arc::clone(&device), Arc::clone(&queue)));
 262        let atlas_sampler = device.create_sampler(&wgpu::SamplerDescriptor {
 263            label: Some("atlas_sampler"),
 264            mag_filter: wgpu::FilterMode::Linear,
 265            min_filter: wgpu::FilterMode::Linear,
 266            ..Default::default()
 267        });
 268
 269        let uniform_alignment = device.limits().min_uniform_buffer_offset_alignment as u64;
 270        let globals_size = std::mem::size_of::<GlobalParams>() as u64;
 271        let gamma_size = std::mem::size_of::<GammaParams>() as u64;
 272        let path_globals_offset = globals_size.next_multiple_of(uniform_alignment);
 273        let gamma_offset = (path_globals_offset + globals_size).next_multiple_of(uniform_alignment);
 274
 275        let globals_buffer = device.create_buffer(&wgpu::BufferDescriptor {
 276            label: Some("globals_buffer"),
 277            size: gamma_offset + gamma_size,
 278            usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
 279            mapped_at_creation: false,
 280        });
 281
 282        let storage_buffer_alignment = device.limits().min_storage_buffer_offset_alignment as u64;
 283        let initial_instance_buffer_capacity = 2 * 1024 * 1024;
 284        let instance_buffer = device.create_buffer(&wgpu::BufferDescriptor {
 285            label: Some("instance_buffer"),
 286            size: initial_instance_buffer_capacity,
 287            usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
 288            mapped_at_creation: false,
 289        });
 290
 291        let globals_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
 292            label: Some("globals_bind_group"),
 293            layout: &bind_group_layouts.globals,
 294            entries: &[
 295                wgpu::BindGroupEntry {
 296                    binding: 0,
 297                    resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
 298                        buffer: &globals_buffer,
 299                        offset: 0,
 300                        size: Some(NonZeroU64::new(globals_size).unwrap()),
 301                    }),
 302                },
 303                wgpu::BindGroupEntry {
 304                    binding: 1,
 305                    resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
 306                        buffer: &globals_buffer,
 307                        offset: gamma_offset,
 308                        size: Some(NonZeroU64::new(gamma_size).unwrap()),
 309                    }),
 310                },
 311            ],
 312        });
 313
 314        let path_globals_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
 315            label: Some("path_globals_bind_group"),
 316            layout: &bind_group_layouts.globals,
 317            entries: &[
 318                wgpu::BindGroupEntry {
 319                    binding: 0,
 320                    resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
 321                        buffer: &globals_buffer,
 322                        offset: path_globals_offset,
 323                        size: Some(NonZeroU64::new(globals_size).unwrap()),
 324                    }),
 325                },
 326                wgpu::BindGroupEntry {
 327                    binding: 1,
 328                    resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
 329                        buffer: &globals_buffer,
 330                        offset: gamma_offset,
 331                        size: Some(NonZeroU64::new(gamma_size).unwrap()),
 332                    }),
 333                },
 334            ],
 335        });
 336
 337        let adapter_info = context.adapter.get_info();
 338
 339        Ok(Self {
 340            device,
 341            queue,
 342            surface,
 343            surface_config,
 344            pipelines,
 345            bind_group_layouts,
 346            atlas,
 347            atlas_sampler,
 348            globals_buffer,
 349            path_globals_offset,
 350            gamma_offset,
 351            globals_bind_group,
 352            path_globals_bind_group,
 353            instance_buffer,
 354            instance_buffer_capacity: initial_instance_buffer_capacity,
 355            storage_buffer_alignment,
 356            // Defer intermediate texture creation to first draw call via ensure_intermediate_textures().
 357            // This avoids panics when the device/surface is in an invalid state during initialization.
 358            path_intermediate_texture: None,
 359            path_intermediate_view: None,
 360            path_msaa_texture: None,
 361            path_msaa_view: None,
 362            rendering_params,
 363            dual_source_blending,
 364            adapter_info,
 365            transparent_alpha_mode,
 366            opaque_alpha_mode,
 367            max_texture_size,
 368        })
 369    }
 370
 371    fn create_bind_group_layouts(device: &wgpu::Device) -> WgpuBindGroupLayouts {
 372        let globals =
 373            device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
 374                label: Some("globals_layout"),
 375                entries: &[
 376                    wgpu::BindGroupLayoutEntry {
 377                        binding: 0,
 378                        visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
 379                        ty: wgpu::BindingType::Buffer {
 380                            ty: wgpu::BufferBindingType::Uniform,
 381                            has_dynamic_offset: false,
 382                            min_binding_size: NonZeroU64::new(
 383                                std::mem::size_of::<GlobalParams>() as u64
 384                            ),
 385                        },
 386                        count: None,
 387                    },
 388                    wgpu::BindGroupLayoutEntry {
 389                        binding: 1,
 390                        visibility: wgpu::ShaderStages::FRAGMENT,
 391                        ty: wgpu::BindingType::Buffer {
 392                            ty: wgpu::BufferBindingType::Uniform,
 393                            has_dynamic_offset: false,
 394                            min_binding_size: NonZeroU64::new(
 395                                std::mem::size_of::<GammaParams>() as u64
 396                            ),
 397                        },
 398                        count: None,
 399                    },
 400                ],
 401            });
 402
 403        let storage_buffer_entry = |binding: u32| wgpu::BindGroupLayoutEntry {
 404            binding,
 405            visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
 406            ty: wgpu::BindingType::Buffer {
 407                ty: wgpu::BufferBindingType::Storage { read_only: true },
 408                has_dynamic_offset: false,
 409                min_binding_size: None,
 410            },
 411            count: None,
 412        };
 413
 414        let instances = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
 415            label: Some("instances_layout"),
 416            entries: &[storage_buffer_entry(0)],
 417        });
 418
 419        let instances_with_texture =
 420            device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
 421                label: Some("instances_with_texture_layout"),
 422                entries: &[
 423                    storage_buffer_entry(0),
 424                    wgpu::BindGroupLayoutEntry {
 425                        binding: 1,
 426                        visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
 427                        ty: wgpu::BindingType::Texture {
 428                            sample_type: wgpu::TextureSampleType::Float { filterable: true },
 429                            view_dimension: wgpu::TextureViewDimension::D2,
 430                            multisampled: false,
 431                        },
 432                        count: None,
 433                    },
 434                    wgpu::BindGroupLayoutEntry {
 435                        binding: 2,
 436                        visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
 437                        ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
 438                        count: None,
 439                    },
 440                ],
 441            });
 442
 443        let surfaces = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
 444            label: Some("surfaces_layout"),
 445            entries: &[
 446                wgpu::BindGroupLayoutEntry {
 447                    binding: 0,
 448                    visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
 449                    ty: wgpu::BindingType::Buffer {
 450                        ty: wgpu::BufferBindingType::Uniform,
 451                        has_dynamic_offset: false,
 452                        min_binding_size: NonZeroU64::new(
 453                            std::mem::size_of::<SurfaceParams>() as u64
 454                        ),
 455                    },
 456                    count: None,
 457                },
 458                wgpu::BindGroupLayoutEntry {
 459                    binding: 1,
 460                    visibility: wgpu::ShaderStages::FRAGMENT,
 461                    ty: wgpu::BindingType::Texture {
 462                        sample_type: wgpu::TextureSampleType::Float { filterable: true },
 463                        view_dimension: wgpu::TextureViewDimension::D2,
 464                        multisampled: false,
 465                    },
 466                    count: None,
 467                },
 468                wgpu::BindGroupLayoutEntry {
 469                    binding: 2,
 470                    visibility: wgpu::ShaderStages::FRAGMENT,
 471                    ty: wgpu::BindingType::Texture {
 472                        sample_type: wgpu::TextureSampleType::Float { filterable: true },
 473                        view_dimension: wgpu::TextureViewDimension::D2,
 474                        multisampled: false,
 475                    },
 476                    count: None,
 477                },
 478                wgpu::BindGroupLayoutEntry {
 479                    binding: 3,
 480                    visibility: wgpu::ShaderStages::FRAGMENT,
 481                    ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
 482                    count: None,
 483                },
 484            ],
 485        });
 486
 487        WgpuBindGroupLayouts {
 488            globals,
 489            instances,
 490            instances_with_texture,
 491            surfaces,
 492        }
 493    }
 494
 495    fn create_pipelines(
 496        device: &wgpu::Device,
 497        layouts: &WgpuBindGroupLayouts,
 498        surface_format: wgpu::TextureFormat,
 499        alpha_mode: wgpu::CompositeAlphaMode,
 500        path_sample_count: u32,
 501        dual_source_blending: bool,
 502    ) -> WgpuPipelines {
 503        let shader_source = include_str!("shaders.wgsl");
 504        let shader_module = device.create_shader_module(wgpu::ShaderModuleDescriptor {
 505            label: Some("gpui_shaders"),
 506            source: wgpu::ShaderSource::Wgsl(shader_source.into()),
 507        });
 508
 509        let blend_mode = match alpha_mode {
 510            wgpu::CompositeAlphaMode::PreMultiplied => {
 511                wgpu::BlendState::PREMULTIPLIED_ALPHA_BLENDING
 512            }
 513            _ => wgpu::BlendState::ALPHA_BLENDING,
 514        };
 515
 516        let color_target = wgpu::ColorTargetState {
 517            format: surface_format,
 518            blend: Some(blend_mode),
 519            write_mask: wgpu::ColorWrites::ALL,
 520        };
 521
 522        let create_pipeline = |name: &str,
 523                               vs_entry: &str,
 524                               fs_entry: &str,
 525                               globals_layout: &wgpu::BindGroupLayout,
 526                               data_layout: &wgpu::BindGroupLayout,
 527                               topology: wgpu::PrimitiveTopology,
 528                               color_targets: &[Option<wgpu::ColorTargetState>],
 529                               sample_count: u32| {
 530            let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
 531                label: Some(&format!("{name}_layout")),
 532                bind_group_layouts: &[globals_layout, data_layout],
 533                immediate_size: 0,
 534            });
 535
 536            device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
 537                label: Some(name),
 538                layout: Some(&pipeline_layout),
 539                vertex: wgpu::VertexState {
 540                    module: &shader_module,
 541                    entry_point: Some(vs_entry),
 542                    buffers: &[],
 543                    compilation_options: wgpu::PipelineCompilationOptions::default(),
 544                },
 545                fragment: Some(wgpu::FragmentState {
 546                    module: &shader_module,
 547                    entry_point: Some(fs_entry),
 548                    targets: color_targets,
 549                    compilation_options: wgpu::PipelineCompilationOptions::default(),
 550                }),
 551                primitive: wgpu::PrimitiveState {
 552                    topology,
 553                    strip_index_format: None,
 554                    front_face: wgpu::FrontFace::Ccw,
 555                    cull_mode: None,
 556                    polygon_mode: wgpu::PolygonMode::Fill,
 557                    unclipped_depth: false,
 558                    conservative: false,
 559                },
 560                depth_stencil: None,
 561                multisample: wgpu::MultisampleState {
 562                    count: sample_count,
 563                    mask: !0,
 564                    alpha_to_coverage_enabled: false,
 565                },
 566                multiview_mask: None,
 567                cache: None,
 568            })
 569        };
 570
 571        let quads = create_pipeline(
 572            "quads",
 573            "vs_quad",
 574            "fs_quad",
 575            &layouts.globals,
 576            &layouts.instances,
 577            wgpu::PrimitiveTopology::TriangleStrip,
 578            &[Some(color_target.clone())],
 579            1,
 580        );
 581
 582        let shadows = create_pipeline(
 583            "shadows",
 584            "vs_shadow",
 585            "fs_shadow",
 586            &layouts.globals,
 587            &layouts.instances,
 588            wgpu::PrimitiveTopology::TriangleStrip,
 589            &[Some(color_target.clone())],
 590            1,
 591        );
 592
 593        let path_rasterization = create_pipeline(
 594            "path_rasterization",
 595            "vs_path_rasterization",
 596            "fs_path_rasterization",
 597            &layouts.globals,
 598            &layouts.instances,
 599            wgpu::PrimitiveTopology::TriangleList,
 600            &[Some(wgpu::ColorTargetState {
 601                format: surface_format,
 602                blend: Some(wgpu::BlendState::PREMULTIPLIED_ALPHA_BLENDING),
 603                write_mask: wgpu::ColorWrites::ALL,
 604            })],
 605            path_sample_count,
 606        );
 607
 608        let paths_blend = wgpu::BlendState {
 609            color: wgpu::BlendComponent {
 610                src_factor: wgpu::BlendFactor::One,
 611                dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
 612                operation: wgpu::BlendOperation::Add,
 613            },
 614            alpha: wgpu::BlendComponent {
 615                src_factor: wgpu::BlendFactor::One,
 616                dst_factor: wgpu::BlendFactor::One,
 617                operation: wgpu::BlendOperation::Add,
 618            },
 619        };
 620
 621        let paths = create_pipeline(
 622            "paths",
 623            "vs_path",
 624            "fs_path",
 625            &layouts.globals,
 626            &layouts.instances_with_texture,
 627            wgpu::PrimitiveTopology::TriangleStrip,
 628            &[Some(wgpu::ColorTargetState {
 629                format: surface_format,
 630                blend: Some(paths_blend),
 631                write_mask: wgpu::ColorWrites::ALL,
 632            })],
 633            1,
 634        );
 635
 636        let underlines = create_pipeline(
 637            "underlines",
 638            "vs_underline",
 639            "fs_underline",
 640            &layouts.globals,
 641            &layouts.instances,
 642            wgpu::PrimitiveTopology::TriangleStrip,
 643            &[Some(color_target.clone())],
 644            1,
 645        );
 646
 647        let mono_sprites = create_pipeline(
 648            "mono_sprites",
 649            "vs_mono_sprite",
 650            "fs_mono_sprite",
 651            &layouts.globals,
 652            &layouts.instances_with_texture,
 653            wgpu::PrimitiveTopology::TriangleStrip,
 654            &[Some(color_target.clone())],
 655            1,
 656        );
 657
 658        let subpixel_sprites = if dual_source_blending {
 659            let subpixel_blend = wgpu::BlendState {
 660                color: wgpu::BlendComponent {
 661                    src_factor: wgpu::BlendFactor::Src1,
 662                    dst_factor: wgpu::BlendFactor::OneMinusSrc1,
 663                    operation: wgpu::BlendOperation::Add,
 664                },
 665                alpha: wgpu::BlendComponent {
 666                    src_factor: wgpu::BlendFactor::One,
 667                    dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
 668                    operation: wgpu::BlendOperation::Add,
 669                },
 670            };
 671
 672            Some(create_pipeline(
 673                "subpixel_sprites",
 674                "vs_subpixel_sprite",
 675                "fs_subpixel_sprite",
 676                &layouts.globals,
 677                &layouts.instances_with_texture,
 678                wgpu::PrimitiveTopology::TriangleStrip,
 679                &[Some(wgpu::ColorTargetState {
 680                    format: surface_format,
 681                    blend: Some(subpixel_blend),
 682                    write_mask: wgpu::ColorWrites::COLOR,
 683                })],
 684                1,
 685            ))
 686        } else {
 687            None
 688        };
 689
 690        let poly_sprites = create_pipeline(
 691            "poly_sprites",
 692            "vs_poly_sprite",
 693            "fs_poly_sprite",
 694            &layouts.globals,
 695            &layouts.instances_with_texture,
 696            wgpu::PrimitiveTopology::TriangleStrip,
 697            &[Some(color_target.clone())],
 698            1,
 699        );
 700
 701        let surfaces = create_pipeline(
 702            "surfaces",
 703            "vs_surface",
 704            "fs_surface",
 705            &layouts.globals,
 706            &layouts.surfaces,
 707            wgpu::PrimitiveTopology::TriangleStrip,
 708            &[Some(color_target)],
 709            1,
 710        );
 711
 712        WgpuPipelines {
 713            quads,
 714            shadows,
 715            path_rasterization,
 716            paths,
 717            underlines,
 718            mono_sprites,
 719            subpixel_sprites,
 720            poly_sprites,
 721            surfaces,
 722        }
 723    }
 724
 725    fn create_path_intermediate(
 726        device: &wgpu::Device,
 727        format: wgpu::TextureFormat,
 728        width: u32,
 729        height: u32,
 730    ) -> (wgpu::Texture, wgpu::TextureView) {
 731        let texture = device.create_texture(&wgpu::TextureDescriptor {
 732            label: Some("path_intermediate"),
 733            size: wgpu::Extent3d {
 734                width: width.max(1),
 735                height: height.max(1),
 736                depth_or_array_layers: 1,
 737            },
 738            mip_level_count: 1,
 739            sample_count: 1,
 740            dimension: wgpu::TextureDimension::D2,
 741            format,
 742            usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::TEXTURE_BINDING,
 743            view_formats: &[],
 744        });
 745        let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
 746        (texture, view)
 747    }
 748
 749    fn create_msaa_if_needed(
 750        device: &wgpu::Device,
 751        format: wgpu::TextureFormat,
 752        width: u32,
 753        height: u32,
 754        sample_count: u32,
 755    ) -> Option<(wgpu::Texture, wgpu::TextureView)> {
 756        if sample_count <= 1 {
 757            return None;
 758        }
 759        let texture = device.create_texture(&wgpu::TextureDescriptor {
 760            label: Some("path_msaa"),
 761            size: wgpu::Extent3d {
 762                width: width.max(1),
 763                height: height.max(1),
 764                depth_or_array_layers: 1,
 765            },
 766            mip_level_count: 1,
 767            sample_count,
 768            dimension: wgpu::TextureDimension::D2,
 769            format,
 770            usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
 771            view_formats: &[],
 772        });
 773        let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
 774        Some((texture, view))
 775    }
 776
 777    pub fn update_drawable_size(&mut self, size: Size<DevicePixels>) {
 778        let width = size.width.0 as u32;
 779        let height = size.height.0 as u32;
 780
 781        if width != self.surface_config.width || height != self.surface_config.height {
 782            let clamped_width = width.min(self.max_texture_size);
 783            let clamped_height = height.min(self.max_texture_size);
 784
 785            if clamped_width != width || clamped_height != height {
 786                warn!(
 787                    "Requested surface size ({}, {}) exceeds maximum texture dimension {}. \
 788                     Clamping to ({}, {}). Window content may not fill the entire window.",
 789                    width, height, self.max_texture_size, clamped_width, clamped_height
 790                );
 791            }
 792
 793            // Wait for any in-flight GPU work to complete before destroying textures
 794            if let Err(e) = self.device.poll(wgpu::PollType::Wait {
 795                submission_index: None,
 796                timeout: None,
 797            }) {
 798                warn!("Failed to poll device during resize: {e:?}");
 799            }
 800
 801            // Destroy old textures before allocating new ones to avoid GPU memory spikes
 802            if let Some(ref texture) = self.path_intermediate_texture {
 803                texture.destroy();
 804            }
 805            if let Some(ref texture) = self.path_msaa_texture {
 806                texture.destroy();
 807            }
 808
 809            self.surface_config.width = clamped_width.max(1);
 810            self.surface_config.height = clamped_height.max(1);
 811            self.surface.configure(&self.device, &self.surface_config);
 812
 813            // Invalidate intermediate textures - they will be lazily recreated
 814            // in draw() after we confirm the surface is healthy. This avoids
 815            // panics when the device/surface is in an invalid state during resize.
 816            self.path_intermediate_texture = None;
 817            self.path_intermediate_view = None;
 818            self.path_msaa_texture = None;
 819            self.path_msaa_view = None;
 820        }
 821    }
 822
 823    fn ensure_intermediate_textures(&mut self) {
 824        if self.path_intermediate_texture.is_some() {
 825            return;
 826        }
 827
 828        let (path_intermediate_texture, path_intermediate_view) = {
 829            let (t, v) = Self::create_path_intermediate(
 830                &self.device,
 831                self.surface_config.format,
 832                self.surface_config.width,
 833                self.surface_config.height,
 834            );
 835            (Some(t), Some(v))
 836        };
 837        self.path_intermediate_texture = path_intermediate_texture;
 838        self.path_intermediate_view = path_intermediate_view;
 839
 840        let (path_msaa_texture, path_msaa_view) = Self::create_msaa_if_needed(
 841            &self.device,
 842            self.surface_config.format,
 843            self.surface_config.width,
 844            self.surface_config.height,
 845            self.rendering_params.path_sample_count,
 846        )
 847        .map(|(t, v)| (Some(t), Some(v)))
 848        .unwrap_or((None, None));
 849        self.path_msaa_texture = path_msaa_texture;
 850        self.path_msaa_view = path_msaa_view;
 851    }
 852
 853    pub fn update_transparency(&mut self, transparent: bool) {
 854        let new_alpha_mode = if transparent {
 855            self.transparent_alpha_mode
 856        } else {
 857            self.opaque_alpha_mode
 858        };
 859
 860        if new_alpha_mode != self.surface_config.alpha_mode {
 861            self.surface_config.alpha_mode = new_alpha_mode;
 862            self.surface.configure(&self.device, &self.surface_config);
 863            self.pipelines = Self::create_pipelines(
 864                &self.device,
 865                &self.bind_group_layouts,
 866                self.surface_config.format,
 867                self.surface_config.alpha_mode,
 868                self.rendering_params.path_sample_count,
 869                self.dual_source_blending,
 870            );
 871        }
 872    }
 873
 874    #[allow(dead_code)]
 875    pub fn viewport_size(&self) -> Size<DevicePixels> {
 876        Size {
 877            width: DevicePixels(self.surface_config.width as i32),
 878            height: DevicePixels(self.surface_config.height as i32),
 879        }
 880    }
 881
 882    pub fn sprite_atlas(&self) -> &Arc<WgpuAtlas> {
 883        &self.atlas
 884    }
 885
 886    pub fn gpu_specs(&self) -> GpuSpecs {
 887        GpuSpecs {
 888            is_software_emulated: self.adapter_info.device_type == wgpu::DeviceType::Cpu,
 889            device_name: self.adapter_info.name.clone(),
 890            driver_name: self.adapter_info.driver.clone(),
 891            driver_info: self.adapter_info.driver_info.clone(),
 892        }
 893    }
 894
 895    pub fn max_texture_size(&self) -> u32 {
 896        self.max_texture_size
 897    }
 898
 899    pub fn draw(&mut self, scene: &Scene) {
 900        self.atlas.before_frame();
 901
 902        let frame = match self.surface.get_current_texture() {
 903            Ok(frame) => frame,
 904            Err(wgpu::SurfaceError::Lost | wgpu::SurfaceError::Outdated) => {
 905                self.surface.configure(&self.device, &self.surface_config);
 906                return;
 907            }
 908            Err(e) => {
 909                log::error!("Failed to acquire surface texture: {e}");
 910                return;
 911            }
 912        };
 913
 914        // Now that we know the surface is healthy, ensure intermediate textures exist
 915        self.ensure_intermediate_textures();
 916
 917        let frame_view = frame
 918            .texture
 919            .create_view(&wgpu::TextureViewDescriptor::default());
 920
 921        let gamma_params = GammaParams {
 922            gamma_ratios: self.rendering_params.gamma_ratios,
 923            grayscale_enhanced_contrast: self.rendering_params.grayscale_enhanced_contrast,
 924            subpixel_enhanced_contrast: self.rendering_params.subpixel_enhanced_contrast,
 925            _pad: [0.0; 2],
 926        };
 927
 928        let globals = GlobalParams {
 929            viewport_size: [
 930                self.surface_config.width as f32,
 931                self.surface_config.height as f32,
 932            ],
 933            premultiplied_alpha: if self.surface_config.alpha_mode
 934                == wgpu::CompositeAlphaMode::PreMultiplied
 935            {
 936                1
 937            } else {
 938                0
 939            },
 940            pad: 0,
 941        };
 942
 943        let path_globals = GlobalParams {
 944            premultiplied_alpha: 0,
 945            ..globals
 946        };
 947
 948        self.queue
 949            .write_buffer(&self.globals_buffer, 0, bytemuck::bytes_of(&globals));
 950        self.queue.write_buffer(
 951            &self.globals_buffer,
 952            self.path_globals_offset,
 953            bytemuck::bytes_of(&path_globals),
 954        );
 955        self.queue.write_buffer(
 956            &self.globals_buffer,
 957            self.gamma_offset,
 958            bytemuck::bytes_of(&gamma_params),
 959        );
 960
 961        loop {
 962            let mut instance_offset: u64 = 0;
 963            let mut overflow = false;
 964
 965            let mut encoder = self
 966                .device
 967                .create_command_encoder(&wgpu::CommandEncoderDescriptor {
 968                    label: Some("main_encoder"),
 969                });
 970
 971            {
 972                let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
 973                    label: Some("main_pass"),
 974                    color_attachments: &[Some(wgpu::RenderPassColorAttachment {
 975                        view: &frame_view,
 976                        resolve_target: None,
 977                        ops: wgpu::Operations {
 978                            load: wgpu::LoadOp::Clear(wgpu::Color::TRANSPARENT),
 979                            store: wgpu::StoreOp::Store,
 980                        },
 981                        depth_slice: None,
 982                    })],
 983                    depth_stencil_attachment: None,
 984                    ..Default::default()
 985                });
 986
 987                for batch in scene.batches() {
 988                    let ok = match batch {
 989                        PrimitiveBatch::Quads(range) => {
 990                            self.draw_quads(&scene.quads[range], &mut instance_offset, &mut pass)
 991                        }
 992                        PrimitiveBatch::Shadows(range) => self.draw_shadows(
 993                            &scene.shadows[range],
 994                            &mut instance_offset,
 995                            &mut pass,
 996                        ),
 997                        PrimitiveBatch::Paths(range) => {
 998                            let paths = &scene.paths[range];
 999                            if paths.is_empty() {
1000                                continue;
1001                            }
1002
1003                            drop(pass);
1004
1005                            let did_draw = self.draw_paths_to_intermediate(
1006                                &mut encoder,
1007                                paths,
1008                                &mut instance_offset,
1009                            );
1010
1011                            pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
1012                                label: Some("main_pass_continued"),
1013                                color_attachments: &[Some(wgpu::RenderPassColorAttachment {
1014                                    view: &frame_view,
1015                                    resolve_target: None,
1016                                    ops: wgpu::Operations {
1017                                        load: wgpu::LoadOp::Load,
1018                                        store: wgpu::StoreOp::Store,
1019                                    },
1020                                    depth_slice: None,
1021                                })],
1022                                depth_stencil_attachment: None,
1023                                ..Default::default()
1024                            });
1025
1026                            if did_draw {
1027                                self.draw_paths_from_intermediate(
1028                                    paths,
1029                                    &mut instance_offset,
1030                                    &mut pass,
1031                                )
1032                            } else {
1033                                false
1034                            }
1035                        }
1036                        PrimitiveBatch::Underlines(range) => self.draw_underlines(
1037                            &scene.underlines[range],
1038                            &mut instance_offset,
1039                            &mut pass,
1040                        ),
1041                        PrimitiveBatch::MonochromeSprites { texture_id, range } => self
1042                            .draw_monochrome_sprites(
1043                                &scene.monochrome_sprites[range],
1044                                texture_id,
1045                                &mut instance_offset,
1046                                &mut pass,
1047                            ),
1048                        PrimitiveBatch::SubpixelSprites { texture_id, range } => self
1049                            .draw_subpixel_sprites(
1050                                &scene.subpixel_sprites[range],
1051                                texture_id,
1052                                &mut instance_offset,
1053                                &mut pass,
1054                            ),
1055                        PrimitiveBatch::PolychromeSprites { texture_id, range } => self
1056                            .draw_polychrome_sprites(
1057                                &scene.polychrome_sprites[range],
1058                                texture_id,
1059                                &mut instance_offset,
1060                                &mut pass,
1061                            ),
1062                        PrimitiveBatch::Surfaces(_surfaces) => {
1063                            // Surfaces are macOS-only for video playback
1064                            // Not implemented for Linux/wgpu
1065                            true
1066                        }
1067                    };
1068                    if !ok {
1069                        overflow = true;
1070                        break;
1071                    }
1072                }
1073            }
1074
1075            if overflow {
1076                drop(encoder);
1077                if self.instance_buffer_capacity >= 256 * 1024 * 1024 {
1078                    log::error!(
1079                        "instance buffer size grew too large: {}",
1080                        self.instance_buffer_capacity
1081                    );
1082                    frame.present();
1083                    return;
1084                }
1085                self.grow_instance_buffer();
1086                continue;
1087            }
1088
1089            self.queue.submit(std::iter::once(encoder.finish()));
1090            frame.present();
1091            return;
1092        }
1093    }
1094
1095    fn draw_quads(
1096        &self,
1097        quads: &[Quad],
1098        instance_offset: &mut u64,
1099        pass: &mut wgpu::RenderPass<'_>,
1100    ) -> bool {
1101        let data = unsafe { Self::instance_bytes(quads) };
1102        self.draw_instances(
1103            data,
1104            quads.len() as u32,
1105            &self.pipelines.quads,
1106            instance_offset,
1107            pass,
1108        )
1109    }
1110
1111    fn draw_shadows(
1112        &self,
1113        shadows: &[Shadow],
1114        instance_offset: &mut u64,
1115        pass: &mut wgpu::RenderPass<'_>,
1116    ) -> bool {
1117        let data = unsafe { Self::instance_bytes(shadows) };
1118        self.draw_instances(
1119            data,
1120            shadows.len() as u32,
1121            &self.pipelines.shadows,
1122            instance_offset,
1123            pass,
1124        )
1125    }
1126
1127    fn draw_underlines(
1128        &self,
1129        underlines: &[Underline],
1130        instance_offset: &mut u64,
1131        pass: &mut wgpu::RenderPass<'_>,
1132    ) -> bool {
1133        let data = unsafe { Self::instance_bytes(underlines) };
1134        self.draw_instances(
1135            data,
1136            underlines.len() as u32,
1137            &self.pipelines.underlines,
1138            instance_offset,
1139            pass,
1140        )
1141    }
1142
1143    fn draw_monochrome_sprites(
1144        &self,
1145        sprites: &[MonochromeSprite],
1146        texture_id: AtlasTextureId,
1147        instance_offset: &mut u64,
1148        pass: &mut wgpu::RenderPass<'_>,
1149    ) -> bool {
1150        let tex_info = self.atlas.get_texture_info(texture_id);
1151        let data = unsafe { Self::instance_bytes(sprites) };
1152        self.draw_instances_with_texture(
1153            data,
1154            sprites.len() as u32,
1155            &tex_info.view,
1156            &self.pipelines.mono_sprites,
1157            instance_offset,
1158            pass,
1159        )
1160    }
1161
1162    fn draw_subpixel_sprites(
1163        &self,
1164        sprites: &[SubpixelSprite],
1165        texture_id: AtlasTextureId,
1166        instance_offset: &mut u64,
1167        pass: &mut wgpu::RenderPass<'_>,
1168    ) -> bool {
1169        let tex_info = self.atlas.get_texture_info(texture_id);
1170        let data = unsafe { Self::instance_bytes(sprites) };
1171        let pipeline = self
1172            .pipelines
1173            .subpixel_sprites
1174            .as_ref()
1175            .unwrap_or(&self.pipelines.mono_sprites);
1176        self.draw_instances_with_texture(
1177            data,
1178            sprites.len() as u32,
1179            &tex_info.view,
1180            pipeline,
1181            instance_offset,
1182            pass,
1183        )
1184    }
1185
1186    fn draw_polychrome_sprites(
1187        &self,
1188        sprites: &[PolychromeSprite],
1189        texture_id: AtlasTextureId,
1190        instance_offset: &mut u64,
1191        pass: &mut wgpu::RenderPass<'_>,
1192    ) -> bool {
1193        let tex_info = self.atlas.get_texture_info(texture_id);
1194        let data = unsafe { Self::instance_bytes(sprites) };
1195        self.draw_instances_with_texture(
1196            data,
1197            sprites.len() as u32,
1198            &tex_info.view,
1199            &self.pipelines.poly_sprites,
1200            instance_offset,
1201            pass,
1202        )
1203    }
1204
1205    fn draw_instances(
1206        &self,
1207        data: &[u8],
1208        instance_count: u32,
1209        pipeline: &wgpu::RenderPipeline,
1210        instance_offset: &mut u64,
1211        pass: &mut wgpu::RenderPass<'_>,
1212    ) -> bool {
1213        if instance_count == 0 {
1214            return true;
1215        }
1216        let Some((offset, size)) = self.write_to_instance_buffer(instance_offset, data) else {
1217            return false;
1218        };
1219        let bind_group = self.device.create_bind_group(&wgpu::BindGroupDescriptor {
1220            label: None,
1221            layout: &self.bind_group_layouts.instances,
1222            entries: &[wgpu::BindGroupEntry {
1223                binding: 0,
1224                resource: self.instance_binding(offset, size),
1225            }],
1226        });
1227        pass.set_pipeline(pipeline);
1228        pass.set_bind_group(0, &self.globals_bind_group, &[]);
1229        pass.set_bind_group(1, &bind_group, &[]);
1230        pass.draw(0..4, 0..instance_count);
1231        true
1232    }
1233
1234    fn draw_instances_with_texture(
1235        &self,
1236        data: &[u8],
1237        instance_count: u32,
1238        texture_view: &wgpu::TextureView,
1239        pipeline: &wgpu::RenderPipeline,
1240        instance_offset: &mut u64,
1241        pass: &mut wgpu::RenderPass<'_>,
1242    ) -> bool {
1243        if instance_count == 0 {
1244            return true;
1245        }
1246        let Some((offset, size)) = self.write_to_instance_buffer(instance_offset, data) else {
1247            return false;
1248        };
1249        let bind_group = self.device.create_bind_group(&wgpu::BindGroupDescriptor {
1250            label: None,
1251            layout: &self.bind_group_layouts.instances_with_texture,
1252            entries: &[
1253                wgpu::BindGroupEntry {
1254                    binding: 0,
1255                    resource: self.instance_binding(offset, size),
1256                },
1257                wgpu::BindGroupEntry {
1258                    binding: 1,
1259                    resource: wgpu::BindingResource::TextureView(texture_view),
1260                },
1261                wgpu::BindGroupEntry {
1262                    binding: 2,
1263                    resource: wgpu::BindingResource::Sampler(&self.atlas_sampler),
1264                },
1265            ],
1266        });
1267        pass.set_pipeline(pipeline);
1268        pass.set_bind_group(0, &self.globals_bind_group, &[]);
1269        pass.set_bind_group(1, &bind_group, &[]);
1270        pass.draw(0..4, 0..instance_count);
1271        true
1272    }
1273
1274    unsafe fn instance_bytes<T>(instances: &[T]) -> &[u8] {
1275        unsafe {
1276            std::slice::from_raw_parts(
1277                instances.as_ptr() as *const u8,
1278                std::mem::size_of_val(instances),
1279            )
1280        }
1281    }
1282
1283    fn draw_paths_from_intermediate(
1284        &self,
1285        paths: &[Path<ScaledPixels>],
1286        instance_offset: &mut u64,
1287        pass: &mut wgpu::RenderPass<'_>,
1288    ) -> bool {
1289        let first_path = &paths[0];
1290        let sprites: Vec<PathSprite> = if paths.last().map(|p| &p.order) == Some(&first_path.order)
1291        {
1292            paths
1293                .iter()
1294                .map(|p| PathSprite {
1295                    bounds: p.clipped_bounds(),
1296                })
1297                .collect()
1298        } else {
1299            let mut bounds = first_path.clipped_bounds();
1300            for path in paths.iter().skip(1) {
1301                bounds = bounds.union(&path.clipped_bounds());
1302            }
1303            vec![PathSprite { bounds }]
1304        };
1305
1306        let Some(path_intermediate_view) = self.path_intermediate_view.as_ref() else {
1307            return true;
1308        };
1309
1310        let sprite_data = unsafe { Self::instance_bytes(&sprites) };
1311        self.draw_instances_with_texture(
1312            sprite_data,
1313            sprites.len() as u32,
1314            path_intermediate_view,
1315            &self.pipelines.paths,
1316            instance_offset,
1317            pass,
1318        )
1319    }
1320
1321    fn draw_paths_to_intermediate(
1322        &self,
1323        encoder: &mut wgpu::CommandEncoder,
1324        paths: &[Path<ScaledPixels>],
1325        instance_offset: &mut u64,
1326    ) -> bool {
1327        let mut vertices = Vec::new();
1328        for path in paths {
1329            let bounds = path.clipped_bounds();
1330            vertices.extend(path.vertices.iter().map(|v| PathRasterizationVertex {
1331                xy_position: v.xy_position,
1332                st_position: v.st_position,
1333                color: path.color,
1334                bounds,
1335            }));
1336        }
1337
1338        if vertices.is_empty() {
1339            return true;
1340        }
1341
1342        let vertex_data = unsafe { Self::instance_bytes(&vertices) };
1343        let Some((vertex_offset, vertex_size)) =
1344            self.write_to_instance_buffer(instance_offset, vertex_data)
1345        else {
1346            return false;
1347        };
1348
1349        let data_bind_group = self.device.create_bind_group(&wgpu::BindGroupDescriptor {
1350            label: Some("path_rasterization_bind_group"),
1351            layout: &self.bind_group_layouts.instances,
1352            entries: &[wgpu::BindGroupEntry {
1353                binding: 0,
1354                resource: self.instance_binding(vertex_offset, vertex_size),
1355            }],
1356        });
1357
1358        let Some(path_intermediate_view) = self.path_intermediate_view.as_ref() else {
1359            return true;
1360        };
1361
1362        let (target_view, resolve_target) = if let Some(ref msaa_view) = self.path_msaa_view {
1363            (msaa_view, Some(path_intermediate_view))
1364        } else {
1365            (path_intermediate_view, None)
1366        };
1367
1368        {
1369            let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
1370                label: Some("path_rasterization_pass"),
1371                color_attachments: &[Some(wgpu::RenderPassColorAttachment {
1372                    view: target_view,
1373                    resolve_target,
1374                    ops: wgpu::Operations {
1375                        load: wgpu::LoadOp::Clear(wgpu::Color::TRANSPARENT),
1376                        store: wgpu::StoreOp::Store,
1377                    },
1378                    depth_slice: None,
1379                })],
1380                depth_stencil_attachment: None,
1381                ..Default::default()
1382            });
1383
1384            pass.set_pipeline(&self.pipelines.path_rasterization);
1385            pass.set_bind_group(0, &self.path_globals_bind_group, &[]);
1386            pass.set_bind_group(1, &data_bind_group, &[]);
1387            pass.draw(0..vertices.len() as u32, 0..1);
1388        }
1389
1390        true
1391    }
1392
1393    fn grow_instance_buffer(&mut self) {
1394        let new_capacity = self.instance_buffer_capacity * 2;
1395        log::info!("increased instance buffer size to {}", new_capacity);
1396        self.instance_buffer = self.device.create_buffer(&wgpu::BufferDescriptor {
1397            label: Some("instance_buffer"),
1398            size: new_capacity,
1399            usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
1400            mapped_at_creation: false,
1401        });
1402        self.instance_buffer_capacity = new_capacity;
1403    }
1404
1405    fn write_to_instance_buffer(
1406        &self,
1407        instance_offset: &mut u64,
1408        data: &[u8],
1409    ) -> Option<(u64, NonZeroU64)> {
1410        let offset = (*instance_offset).next_multiple_of(self.storage_buffer_alignment);
1411        let size = (data.len() as u64).max(16);
1412        if offset + size > self.instance_buffer_capacity {
1413            return None;
1414        }
1415        self.queue.write_buffer(&self.instance_buffer, offset, data);
1416        *instance_offset = offset + size;
1417        Some((offset, NonZeroU64::new(size).expect("size is at least 16")))
1418    }
1419
1420    fn instance_binding(&self, offset: u64, size: NonZeroU64) -> wgpu::BindingResource<'_> {
1421        wgpu::BindingResource::Buffer(wgpu::BufferBinding {
1422            buffer: &self.instance_buffer,
1423            offset,
1424            size: Some(size),
1425        })
1426    }
1427
1428    pub fn destroy(&mut self) {
1429        // wgpu resources are automatically cleaned up when dropped
1430    }
1431}
1432
1433struct RenderingParameters {
1434    path_sample_count: u32,
1435    gamma_ratios: [f32; 4],
1436    grayscale_enhanced_contrast: f32,
1437    subpixel_enhanced_contrast: f32,
1438}
1439
1440impl RenderingParameters {
1441    fn new(adapter: &wgpu::Adapter, surface_format: wgpu::TextureFormat) -> Self {
1442        use std::env;
1443
1444        let format_features = adapter.get_texture_format_features(surface_format);
1445        let path_sample_count = [4, 2, 1]
1446            .into_iter()
1447            .find(|&n| format_features.flags.sample_count_supported(n))
1448            .unwrap_or(1);
1449
1450        let gamma = env::var("ZED_FONTS_GAMMA")
1451            .ok()
1452            .and_then(|v| v.parse().ok())
1453            .unwrap_or(1.8_f32)
1454            .clamp(1.0, 2.2);
1455        let gamma_ratios = get_gamma_correction_ratios(gamma);
1456
1457        let grayscale_enhanced_contrast = env::var("ZED_FONTS_GRAYSCALE_ENHANCED_CONTRAST")
1458            .ok()
1459            .and_then(|v| v.parse().ok())
1460            .unwrap_or(1.0_f32)
1461            .max(0.0);
1462
1463        let subpixel_enhanced_contrast = env::var("ZED_FONTS_SUBPIXEL_ENHANCED_CONTRAST")
1464            .ok()
1465            .and_then(|v| v.parse().ok())
1466            .unwrap_or(0.5_f32)
1467            .max(0.0);
1468
1469        Self {
1470            path_sample_count,
1471            gamma_ratios,
1472            grayscale_enhanced_contrast,
1473            subpixel_enhanced_contrast,
1474        }
1475    }
1476}