wgpu_renderer.rs

   1use super::{CompositorGpuHint, WgpuAtlas, WgpuContext};
   2use crate::{
   3    AtlasTextureId, Background, Bounds, DevicePixels, GpuSpecs, MonochromeSprite, Path, Point,
   4    PolychromeSprite, PrimitiveBatch, Quad, ScaledPixels, Scene, Shadow, Size, SubpixelSprite,
   5    Underline, get_gamma_correction_ratios,
   6};
   7use bytemuck::{Pod, Zeroable};
   8use log::warn;
   9use raw_window_handle::{HasDisplayHandle, HasWindowHandle};
  10use std::num::NonZeroU64;
  11use std::sync::{Arc, Mutex};
  12
  13#[repr(C)]
  14#[derive(Clone, Copy, Pod, Zeroable)]
  15struct GlobalParams {
  16    viewport_size: [f32; 2],
  17    premultiplied_alpha: u32,
  18    pad: u32,
  19}
  20
  21#[repr(C)]
  22#[derive(Clone, Copy, Pod, Zeroable)]
  23struct PodBounds {
  24    origin: [f32; 2],
  25    size: [f32; 2],
  26}
  27
  28impl From<Bounds<ScaledPixels>> for PodBounds {
  29    fn from(bounds: Bounds<ScaledPixels>) -> Self {
  30        Self {
  31            origin: [bounds.origin.x.0, bounds.origin.y.0],
  32            size: [bounds.size.width.0, bounds.size.height.0],
  33        }
  34    }
  35}
  36
  37#[repr(C)]
  38#[derive(Clone, Copy, Pod, Zeroable)]
  39struct SurfaceParams {
  40    bounds: PodBounds,
  41    content_mask: PodBounds,
  42}
  43
  44#[repr(C)]
  45#[derive(Clone, Copy, Pod, Zeroable)]
  46struct GammaParams {
  47    gamma_ratios: [f32; 4],
  48    grayscale_enhanced_contrast: f32,
  49    subpixel_enhanced_contrast: f32,
  50    _pad: [f32; 2],
  51}
  52
  53#[derive(Clone, Debug)]
  54#[repr(C)]
  55struct PathSprite {
  56    bounds: Bounds<ScaledPixels>,
  57}
  58
  59#[derive(Clone, Debug)]
  60#[repr(C)]
  61struct PathRasterizationVertex {
  62    xy_position: Point<ScaledPixels>,
  63    st_position: Point<f32>,
  64    color: Background,
  65    bounds: Bounds<ScaledPixels>,
  66}
  67
  68pub struct WgpuSurfaceConfig {
  69    pub size: Size<DevicePixels>,
  70    pub transparent: bool,
  71}
  72
  73struct WgpuPipelines {
  74    quads: wgpu::RenderPipeline,
  75    shadows: wgpu::RenderPipeline,
  76    path_rasterization: wgpu::RenderPipeline,
  77    paths: wgpu::RenderPipeline,
  78    underlines: wgpu::RenderPipeline,
  79    mono_sprites: wgpu::RenderPipeline,
  80    subpixel_sprites: Option<wgpu::RenderPipeline>,
  81    poly_sprites: wgpu::RenderPipeline,
  82    #[allow(dead_code)]
  83    surfaces: wgpu::RenderPipeline,
  84}
  85
  86struct WgpuBindGroupLayouts {
  87    globals: wgpu::BindGroupLayout,
  88    instances: wgpu::BindGroupLayout,
  89    instances_with_texture: wgpu::BindGroupLayout,
  90    surfaces: wgpu::BindGroupLayout,
  91}
  92
  93pub struct WgpuRenderer {
  94    device: Arc<wgpu::Device>,
  95    queue: Arc<wgpu::Queue>,
  96    surface: wgpu::Surface<'static>,
  97    surface_config: wgpu::SurfaceConfiguration,
  98    surface_configured: bool,
  99    pipelines: WgpuPipelines,
 100    bind_group_layouts: WgpuBindGroupLayouts,
 101    atlas: Arc<WgpuAtlas>,
 102    atlas_sampler: wgpu::Sampler,
 103    globals_buffer: wgpu::Buffer,
 104    path_globals_offset: u64,
 105    gamma_offset: u64,
 106    globals_bind_group: wgpu::BindGroup,
 107    path_globals_bind_group: wgpu::BindGroup,
 108    instance_buffer: wgpu::Buffer,
 109    instance_buffer_capacity: u64,
 110    storage_buffer_alignment: u64,
 111    path_intermediate_texture: Option<wgpu::Texture>,
 112    path_intermediate_view: Option<wgpu::TextureView>,
 113    path_msaa_texture: Option<wgpu::Texture>,
 114    path_msaa_view: Option<wgpu::TextureView>,
 115    rendering_params: RenderingParameters,
 116    dual_source_blending: bool,
 117    adapter_info: wgpu::AdapterInfo,
 118    transparent_alpha_mode: wgpu::CompositeAlphaMode,
 119    opaque_alpha_mode: wgpu::CompositeAlphaMode,
 120    max_texture_size: u32,
 121    last_error: Arc<Mutex<Option<String>>>,
 122    failed_frame_count: u32,
 123}
 124
 125impl WgpuRenderer {
 126    /// Creates a new WgpuRenderer from raw window handles.
 127    ///
 128    /// # Safety
 129    /// The caller must ensure that the window handle remains valid for the lifetime
 130    /// of the returned renderer.
 131    pub fn new<W: HasWindowHandle + HasDisplayHandle>(
 132        gpu_context: &mut Option<WgpuContext>,
 133        window: &W,
 134        config: WgpuSurfaceConfig,
 135        compositor_gpu: Option<CompositorGpuHint>,
 136    ) -> anyhow::Result<Self> {
 137        let window_handle = window
 138            .window_handle()
 139            .map_err(|e| anyhow::anyhow!("Failed to get window handle: {e}"))?;
 140        let display_handle = window
 141            .display_handle()
 142            .map_err(|e| anyhow::anyhow!("Failed to get display handle: {e}"))?;
 143
 144        let target = wgpu::SurfaceTargetUnsafe::RawHandle {
 145            raw_display_handle: display_handle.as_raw(),
 146            raw_window_handle: window_handle.as_raw(),
 147        };
 148
 149        // Use the existing context's instance if available, otherwise create a new one.
 150        // The surface must be created with the same instance that will be used for
 151        // adapter selection, otherwise wgpu will panic.
 152        let instance = gpu_context
 153            .as_ref()
 154            .map(|ctx| ctx.instance.clone())
 155            .unwrap_or_else(WgpuContext::instance);
 156
 157        // Safety: The caller guarantees that the window handle is valid for the
 158        // lifetime of this renderer. In practice, the RawWindow struct is created
 159        // from the native window handles and the surface is dropped before the window.
 160        let surface = unsafe {
 161            instance
 162                .create_surface_unsafe(target)
 163                .map_err(|e| anyhow::anyhow!("Failed to create surface: {e}"))?
 164        };
 165
 166        let context = match gpu_context {
 167            Some(context) => {
 168                context.check_compatible_with_surface(&surface)?;
 169                context
 170            }
 171            None => gpu_context.insert(WgpuContext::new(instance, &surface, compositor_gpu)?),
 172        };
 173
 174        Self::new_with_surface(context, surface, config)
 175    }
 176
 177    fn new_with_surface(
 178        context: &WgpuContext,
 179        surface: wgpu::Surface<'static>,
 180        config: WgpuSurfaceConfig,
 181    ) -> anyhow::Result<Self> {
 182        let surface_caps = surface.get_capabilities(&context.adapter);
 183        let preferred_formats = [
 184            wgpu::TextureFormat::Bgra8Unorm,
 185            wgpu::TextureFormat::Rgba8Unorm,
 186        ];
 187        let surface_format = preferred_formats
 188            .iter()
 189            .find(|f| surface_caps.formats.contains(f))
 190            .copied()
 191            .or_else(|| surface_caps.formats.iter().find(|f| !f.is_srgb()).copied())
 192            .or_else(|| surface_caps.formats.first().copied())
 193            .ok_or_else(|| {
 194                anyhow::anyhow!(
 195                    "Surface reports no supported texture formats for adapter {:?}",
 196                    context.adapter.get_info().name
 197                )
 198            })?;
 199
 200        let pick_alpha_mode =
 201            |preferences: &[wgpu::CompositeAlphaMode]| -> anyhow::Result<wgpu::CompositeAlphaMode> {
 202                preferences
 203                    .iter()
 204                    .find(|p| surface_caps.alpha_modes.contains(p))
 205                    .copied()
 206                    .or_else(|| surface_caps.alpha_modes.first().copied())
 207                    .ok_or_else(|| {
 208                        anyhow::anyhow!(
 209                            "Surface reports no supported alpha modes for adapter {:?}",
 210                            context.adapter.get_info().name
 211                        )
 212                    })
 213            };
 214
 215        let transparent_alpha_mode = pick_alpha_mode(&[
 216            wgpu::CompositeAlphaMode::PreMultiplied,
 217            wgpu::CompositeAlphaMode::Inherit,
 218        ])?;
 219
 220        let opaque_alpha_mode = pick_alpha_mode(&[
 221            wgpu::CompositeAlphaMode::Opaque,
 222            wgpu::CompositeAlphaMode::Inherit,
 223        ])?;
 224
 225        let alpha_mode = if config.transparent {
 226            transparent_alpha_mode
 227        } else {
 228            opaque_alpha_mode
 229        };
 230
 231        let device = Arc::clone(&context.device);
 232        let max_texture_size = device.limits().max_texture_dimension_2d;
 233
 234        let requested_width = config.size.width.0 as u32;
 235        let requested_height = config.size.height.0 as u32;
 236        let clamped_width = requested_width.min(max_texture_size);
 237        let clamped_height = requested_height.min(max_texture_size);
 238
 239        if clamped_width != requested_width || clamped_height != requested_height {
 240            warn!(
 241                "Requested surface size ({}, {}) exceeds maximum texture dimension {}. \
 242                 Clamping to ({}, {}). Window content may not fill the entire window.",
 243                requested_width, requested_height, max_texture_size, clamped_width, clamped_height
 244            );
 245        }
 246
 247        let surface_config = wgpu::SurfaceConfiguration {
 248            usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
 249            format: surface_format,
 250            width: clamped_width.max(1),
 251            height: clamped_height.max(1),
 252            present_mode: wgpu::PresentMode::Fifo,
 253            desired_maximum_frame_latency: 2,
 254            alpha_mode,
 255            view_formats: vec![],
 256        };
 257        // Configure the surface immediately. The adapter selection process already validated
 258        // that this adapter can successfully configure this surface.
 259        surface.configure(&context.device, &surface_config);
 260
 261        let queue = Arc::clone(&context.queue);
 262        let dual_source_blending = context.supports_dual_source_blending();
 263
 264        let rendering_params = RenderingParameters::new(&context.adapter, surface_format);
 265        let bind_group_layouts = Self::create_bind_group_layouts(&device);
 266        let pipelines = Self::create_pipelines(
 267            &device,
 268            &bind_group_layouts,
 269            surface_format,
 270            alpha_mode,
 271            rendering_params.path_sample_count,
 272            dual_source_blending,
 273        );
 274
 275        let atlas = Arc::new(WgpuAtlas::new(Arc::clone(&device), Arc::clone(&queue)));
 276        let atlas_sampler = device.create_sampler(&wgpu::SamplerDescriptor {
 277            label: Some("atlas_sampler"),
 278            mag_filter: wgpu::FilterMode::Linear,
 279            min_filter: wgpu::FilterMode::Linear,
 280            ..Default::default()
 281        });
 282
 283        let uniform_alignment = device.limits().min_uniform_buffer_offset_alignment as u64;
 284        let globals_size = std::mem::size_of::<GlobalParams>() as u64;
 285        let gamma_size = std::mem::size_of::<GammaParams>() as u64;
 286        let path_globals_offset = globals_size.next_multiple_of(uniform_alignment);
 287        let gamma_offset = (path_globals_offset + globals_size).next_multiple_of(uniform_alignment);
 288
 289        let globals_buffer = device.create_buffer(&wgpu::BufferDescriptor {
 290            label: Some("globals_buffer"),
 291            size: gamma_offset + gamma_size,
 292            usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
 293            mapped_at_creation: false,
 294        });
 295
 296        let storage_buffer_alignment = device.limits().min_storage_buffer_offset_alignment as u64;
 297        let initial_instance_buffer_capacity = 2 * 1024 * 1024;
 298        let instance_buffer = device.create_buffer(&wgpu::BufferDescriptor {
 299            label: Some("instance_buffer"),
 300            size: initial_instance_buffer_capacity,
 301            usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
 302            mapped_at_creation: false,
 303        });
 304
 305        let globals_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
 306            label: Some("globals_bind_group"),
 307            layout: &bind_group_layouts.globals,
 308            entries: &[
 309                wgpu::BindGroupEntry {
 310                    binding: 0,
 311                    resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
 312                        buffer: &globals_buffer,
 313                        offset: 0,
 314                        size: Some(NonZeroU64::new(globals_size).unwrap()),
 315                    }),
 316                },
 317                wgpu::BindGroupEntry {
 318                    binding: 1,
 319                    resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
 320                        buffer: &globals_buffer,
 321                        offset: gamma_offset,
 322                        size: Some(NonZeroU64::new(gamma_size).unwrap()),
 323                    }),
 324                },
 325            ],
 326        });
 327
 328        let path_globals_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
 329            label: Some("path_globals_bind_group"),
 330            layout: &bind_group_layouts.globals,
 331            entries: &[
 332                wgpu::BindGroupEntry {
 333                    binding: 0,
 334                    resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
 335                        buffer: &globals_buffer,
 336                        offset: path_globals_offset,
 337                        size: Some(NonZeroU64::new(globals_size).unwrap()),
 338                    }),
 339                },
 340                wgpu::BindGroupEntry {
 341                    binding: 1,
 342                    resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
 343                        buffer: &globals_buffer,
 344                        offset: gamma_offset,
 345                        size: Some(NonZeroU64::new(gamma_size).unwrap()),
 346                    }),
 347                },
 348            ],
 349        });
 350
 351        let adapter_info = context.adapter.get_info();
 352
 353        let last_error: Arc<Mutex<Option<String>>> = Arc::new(Mutex::new(None));
 354        let last_error_clone = Arc::clone(&last_error);
 355        device.on_uncaptured_error(Arc::new(move |error| {
 356            let mut guard = last_error_clone.lock().unwrap();
 357            *guard = Some(error.to_string());
 358        }));
 359
 360        Ok(Self {
 361            device,
 362            queue,
 363            surface,
 364            surface_config,
 365            surface_configured: true,
 366            pipelines,
 367            bind_group_layouts,
 368            atlas,
 369            atlas_sampler,
 370            globals_buffer,
 371            path_globals_offset,
 372            gamma_offset,
 373            globals_bind_group,
 374            path_globals_bind_group,
 375            instance_buffer,
 376            instance_buffer_capacity: initial_instance_buffer_capacity,
 377            storage_buffer_alignment,
 378            // Defer intermediate texture creation to first draw call via ensure_intermediate_textures().
 379            // This avoids panics when the device/surface is in an invalid state during initialization.
 380            path_intermediate_texture: None,
 381            path_intermediate_view: None,
 382            path_msaa_texture: None,
 383            path_msaa_view: None,
 384            rendering_params,
 385            dual_source_blending,
 386            adapter_info,
 387            transparent_alpha_mode,
 388            opaque_alpha_mode,
 389            max_texture_size,
 390            last_error,
 391            failed_frame_count: 0,
 392        })
 393    }
 394
 395    fn create_bind_group_layouts(device: &wgpu::Device) -> WgpuBindGroupLayouts {
 396        let globals =
 397            device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
 398                label: Some("globals_layout"),
 399                entries: &[
 400                    wgpu::BindGroupLayoutEntry {
 401                        binding: 0,
 402                        visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
 403                        ty: wgpu::BindingType::Buffer {
 404                            ty: wgpu::BufferBindingType::Uniform,
 405                            has_dynamic_offset: false,
 406                            min_binding_size: NonZeroU64::new(
 407                                std::mem::size_of::<GlobalParams>() as u64
 408                            ),
 409                        },
 410                        count: None,
 411                    },
 412                    wgpu::BindGroupLayoutEntry {
 413                        binding: 1,
 414                        visibility: wgpu::ShaderStages::FRAGMENT,
 415                        ty: wgpu::BindingType::Buffer {
 416                            ty: wgpu::BufferBindingType::Uniform,
 417                            has_dynamic_offset: false,
 418                            min_binding_size: NonZeroU64::new(
 419                                std::mem::size_of::<GammaParams>() as u64
 420                            ),
 421                        },
 422                        count: None,
 423                    },
 424                ],
 425            });
 426
 427        let storage_buffer_entry = |binding: u32| wgpu::BindGroupLayoutEntry {
 428            binding,
 429            visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
 430            ty: wgpu::BindingType::Buffer {
 431                ty: wgpu::BufferBindingType::Storage { read_only: true },
 432                has_dynamic_offset: false,
 433                min_binding_size: None,
 434            },
 435            count: None,
 436        };
 437
 438        let instances = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
 439            label: Some("instances_layout"),
 440            entries: &[storage_buffer_entry(0)],
 441        });
 442
 443        let instances_with_texture =
 444            device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
 445                label: Some("instances_with_texture_layout"),
 446                entries: &[
 447                    storage_buffer_entry(0),
 448                    wgpu::BindGroupLayoutEntry {
 449                        binding: 1,
 450                        visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
 451                        ty: wgpu::BindingType::Texture {
 452                            sample_type: wgpu::TextureSampleType::Float { filterable: true },
 453                            view_dimension: wgpu::TextureViewDimension::D2,
 454                            multisampled: false,
 455                        },
 456                        count: None,
 457                    },
 458                    wgpu::BindGroupLayoutEntry {
 459                        binding: 2,
 460                        visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
 461                        ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
 462                        count: None,
 463                    },
 464                ],
 465            });
 466
 467        let surfaces = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
 468            label: Some("surfaces_layout"),
 469            entries: &[
 470                wgpu::BindGroupLayoutEntry {
 471                    binding: 0,
 472                    visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
 473                    ty: wgpu::BindingType::Buffer {
 474                        ty: wgpu::BufferBindingType::Uniform,
 475                        has_dynamic_offset: false,
 476                        min_binding_size: NonZeroU64::new(
 477                            std::mem::size_of::<SurfaceParams>() as u64
 478                        ),
 479                    },
 480                    count: None,
 481                },
 482                wgpu::BindGroupLayoutEntry {
 483                    binding: 1,
 484                    visibility: wgpu::ShaderStages::FRAGMENT,
 485                    ty: wgpu::BindingType::Texture {
 486                        sample_type: wgpu::TextureSampleType::Float { filterable: true },
 487                        view_dimension: wgpu::TextureViewDimension::D2,
 488                        multisampled: false,
 489                    },
 490                    count: None,
 491                },
 492                wgpu::BindGroupLayoutEntry {
 493                    binding: 2,
 494                    visibility: wgpu::ShaderStages::FRAGMENT,
 495                    ty: wgpu::BindingType::Texture {
 496                        sample_type: wgpu::TextureSampleType::Float { filterable: true },
 497                        view_dimension: wgpu::TextureViewDimension::D2,
 498                        multisampled: false,
 499                    },
 500                    count: None,
 501                },
 502                wgpu::BindGroupLayoutEntry {
 503                    binding: 3,
 504                    visibility: wgpu::ShaderStages::FRAGMENT,
 505                    ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
 506                    count: None,
 507                },
 508            ],
 509        });
 510
 511        WgpuBindGroupLayouts {
 512            globals,
 513            instances,
 514            instances_with_texture,
 515            surfaces,
 516        }
 517    }
 518
 519    fn create_pipelines(
 520        device: &wgpu::Device,
 521        layouts: &WgpuBindGroupLayouts,
 522        surface_format: wgpu::TextureFormat,
 523        alpha_mode: wgpu::CompositeAlphaMode,
 524        path_sample_count: u32,
 525        dual_source_blending: bool,
 526    ) -> WgpuPipelines {
 527        let shader_source = include_str!("shaders.wgsl");
 528        let shader_module = device.create_shader_module(wgpu::ShaderModuleDescriptor {
 529            label: Some("gpui_shaders"),
 530            source: wgpu::ShaderSource::Wgsl(shader_source.into()),
 531        });
 532
 533        let blend_mode = match alpha_mode {
 534            wgpu::CompositeAlphaMode::PreMultiplied => {
 535                wgpu::BlendState::PREMULTIPLIED_ALPHA_BLENDING
 536            }
 537            _ => wgpu::BlendState::ALPHA_BLENDING,
 538        };
 539
 540        let color_target = wgpu::ColorTargetState {
 541            format: surface_format,
 542            blend: Some(blend_mode),
 543            write_mask: wgpu::ColorWrites::ALL,
 544        };
 545
 546        let create_pipeline = |name: &str,
 547                               vs_entry: &str,
 548                               fs_entry: &str,
 549                               globals_layout: &wgpu::BindGroupLayout,
 550                               data_layout: &wgpu::BindGroupLayout,
 551                               topology: wgpu::PrimitiveTopology,
 552                               color_targets: &[Option<wgpu::ColorTargetState>],
 553                               sample_count: u32| {
 554            let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
 555                label: Some(&format!("{name}_layout")),
 556                bind_group_layouts: &[globals_layout, data_layout],
 557                immediate_size: 0,
 558            });
 559
 560            device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
 561                label: Some(name),
 562                layout: Some(&pipeline_layout),
 563                vertex: wgpu::VertexState {
 564                    module: &shader_module,
 565                    entry_point: Some(vs_entry),
 566                    buffers: &[],
 567                    compilation_options: wgpu::PipelineCompilationOptions::default(),
 568                },
 569                fragment: Some(wgpu::FragmentState {
 570                    module: &shader_module,
 571                    entry_point: Some(fs_entry),
 572                    targets: color_targets,
 573                    compilation_options: wgpu::PipelineCompilationOptions::default(),
 574                }),
 575                primitive: wgpu::PrimitiveState {
 576                    topology,
 577                    strip_index_format: None,
 578                    front_face: wgpu::FrontFace::Ccw,
 579                    cull_mode: None,
 580                    polygon_mode: wgpu::PolygonMode::Fill,
 581                    unclipped_depth: false,
 582                    conservative: false,
 583                },
 584                depth_stencil: None,
 585                multisample: wgpu::MultisampleState {
 586                    count: sample_count,
 587                    mask: !0,
 588                    alpha_to_coverage_enabled: false,
 589                },
 590                multiview_mask: None,
 591                cache: None,
 592            })
 593        };
 594
 595        let quads = create_pipeline(
 596            "quads",
 597            "vs_quad",
 598            "fs_quad",
 599            &layouts.globals,
 600            &layouts.instances,
 601            wgpu::PrimitiveTopology::TriangleStrip,
 602            &[Some(color_target.clone())],
 603            1,
 604        );
 605
 606        let shadows = create_pipeline(
 607            "shadows",
 608            "vs_shadow",
 609            "fs_shadow",
 610            &layouts.globals,
 611            &layouts.instances,
 612            wgpu::PrimitiveTopology::TriangleStrip,
 613            &[Some(color_target.clone())],
 614            1,
 615        );
 616
 617        let path_rasterization = create_pipeline(
 618            "path_rasterization",
 619            "vs_path_rasterization",
 620            "fs_path_rasterization",
 621            &layouts.globals,
 622            &layouts.instances,
 623            wgpu::PrimitiveTopology::TriangleList,
 624            &[Some(wgpu::ColorTargetState {
 625                format: surface_format,
 626                blend: Some(wgpu::BlendState::PREMULTIPLIED_ALPHA_BLENDING),
 627                write_mask: wgpu::ColorWrites::ALL,
 628            })],
 629            path_sample_count,
 630        );
 631
 632        let paths_blend = wgpu::BlendState {
 633            color: wgpu::BlendComponent {
 634                src_factor: wgpu::BlendFactor::One,
 635                dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
 636                operation: wgpu::BlendOperation::Add,
 637            },
 638            alpha: wgpu::BlendComponent {
 639                src_factor: wgpu::BlendFactor::One,
 640                dst_factor: wgpu::BlendFactor::One,
 641                operation: wgpu::BlendOperation::Add,
 642            },
 643        };
 644
 645        let paths = create_pipeline(
 646            "paths",
 647            "vs_path",
 648            "fs_path",
 649            &layouts.globals,
 650            &layouts.instances_with_texture,
 651            wgpu::PrimitiveTopology::TriangleStrip,
 652            &[Some(wgpu::ColorTargetState {
 653                format: surface_format,
 654                blend: Some(paths_blend),
 655                write_mask: wgpu::ColorWrites::ALL,
 656            })],
 657            1,
 658        );
 659
 660        let underlines = create_pipeline(
 661            "underlines",
 662            "vs_underline",
 663            "fs_underline",
 664            &layouts.globals,
 665            &layouts.instances,
 666            wgpu::PrimitiveTopology::TriangleStrip,
 667            &[Some(color_target.clone())],
 668            1,
 669        );
 670
 671        let mono_sprites = create_pipeline(
 672            "mono_sprites",
 673            "vs_mono_sprite",
 674            "fs_mono_sprite",
 675            &layouts.globals,
 676            &layouts.instances_with_texture,
 677            wgpu::PrimitiveTopology::TriangleStrip,
 678            &[Some(color_target.clone())],
 679            1,
 680        );
 681
 682        let subpixel_sprites = if dual_source_blending {
 683            let subpixel_blend = wgpu::BlendState {
 684                color: wgpu::BlendComponent {
 685                    src_factor: wgpu::BlendFactor::Src1,
 686                    dst_factor: wgpu::BlendFactor::OneMinusSrc1,
 687                    operation: wgpu::BlendOperation::Add,
 688                },
 689                alpha: wgpu::BlendComponent {
 690                    src_factor: wgpu::BlendFactor::One,
 691                    dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
 692                    operation: wgpu::BlendOperation::Add,
 693                },
 694            };
 695
 696            Some(create_pipeline(
 697                "subpixel_sprites",
 698                "vs_subpixel_sprite",
 699                "fs_subpixel_sprite",
 700                &layouts.globals,
 701                &layouts.instances_with_texture,
 702                wgpu::PrimitiveTopology::TriangleStrip,
 703                &[Some(wgpu::ColorTargetState {
 704                    format: surface_format,
 705                    blend: Some(subpixel_blend),
 706                    write_mask: wgpu::ColorWrites::COLOR,
 707                })],
 708                1,
 709            ))
 710        } else {
 711            None
 712        };
 713
 714        let poly_sprites = create_pipeline(
 715            "poly_sprites",
 716            "vs_poly_sprite",
 717            "fs_poly_sprite",
 718            &layouts.globals,
 719            &layouts.instances_with_texture,
 720            wgpu::PrimitiveTopology::TriangleStrip,
 721            &[Some(color_target.clone())],
 722            1,
 723        );
 724
 725        let surfaces = create_pipeline(
 726            "surfaces",
 727            "vs_surface",
 728            "fs_surface",
 729            &layouts.globals,
 730            &layouts.surfaces,
 731            wgpu::PrimitiveTopology::TriangleStrip,
 732            &[Some(color_target)],
 733            1,
 734        );
 735
 736        WgpuPipelines {
 737            quads,
 738            shadows,
 739            path_rasterization,
 740            paths,
 741            underlines,
 742            mono_sprites,
 743            subpixel_sprites,
 744            poly_sprites,
 745            surfaces,
 746        }
 747    }
 748
 749    fn create_path_intermediate(
 750        device: &wgpu::Device,
 751        format: wgpu::TextureFormat,
 752        width: u32,
 753        height: u32,
 754    ) -> (wgpu::Texture, wgpu::TextureView) {
 755        let texture = device.create_texture(&wgpu::TextureDescriptor {
 756            label: Some("path_intermediate"),
 757            size: wgpu::Extent3d {
 758                width: width.max(1),
 759                height: height.max(1),
 760                depth_or_array_layers: 1,
 761            },
 762            mip_level_count: 1,
 763            sample_count: 1,
 764            dimension: wgpu::TextureDimension::D2,
 765            format,
 766            usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::TEXTURE_BINDING,
 767            view_formats: &[],
 768        });
 769        let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
 770        (texture, view)
 771    }
 772
 773    fn create_msaa_if_needed(
 774        device: &wgpu::Device,
 775        format: wgpu::TextureFormat,
 776        width: u32,
 777        height: u32,
 778        sample_count: u32,
 779    ) -> Option<(wgpu::Texture, wgpu::TextureView)> {
 780        if sample_count <= 1 {
 781            return None;
 782        }
 783        let texture = device.create_texture(&wgpu::TextureDescriptor {
 784            label: Some("path_msaa"),
 785            size: wgpu::Extent3d {
 786                width: width.max(1),
 787                height: height.max(1),
 788                depth_or_array_layers: 1,
 789            },
 790            mip_level_count: 1,
 791            sample_count,
 792            dimension: wgpu::TextureDimension::D2,
 793            format,
 794            usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
 795            view_formats: &[],
 796        });
 797        let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
 798        Some((texture, view))
 799    }
 800
 801    pub fn update_drawable_size(&mut self, size: Size<DevicePixels>) {
 802        let width = size.width.0 as u32;
 803        let height = size.height.0 as u32;
 804
 805        if width != self.surface_config.width || height != self.surface_config.height {
 806            let clamped_width = width.min(self.max_texture_size);
 807            let clamped_height = height.min(self.max_texture_size);
 808
 809            if clamped_width != width || clamped_height != height {
 810                warn!(
 811                    "Requested surface size ({}, {}) exceeds maximum texture dimension {}. \
 812                     Clamping to ({}, {}). Window content may not fill the entire window.",
 813                    width, height, self.max_texture_size, clamped_width, clamped_height
 814                );
 815            }
 816
 817            // Wait for any in-flight GPU work to complete before destroying textures
 818            if let Err(e) = self.device.poll(wgpu::PollType::Wait {
 819                submission_index: None,
 820                timeout: None,
 821            }) {
 822                warn!("Failed to poll device during resize: {e:?}");
 823            }
 824
 825            // Destroy old textures before allocating new ones to avoid GPU memory spikes
 826            if let Some(ref texture) = self.path_intermediate_texture {
 827                texture.destroy();
 828            }
 829            if let Some(ref texture) = self.path_msaa_texture {
 830                texture.destroy();
 831            }
 832
 833            self.surface_config.width = clamped_width.max(1);
 834            self.surface_config.height = clamped_height.max(1);
 835            if self.surface_configured {
 836                self.surface.configure(&self.device, &self.surface_config);
 837            }
 838
 839            // Invalidate intermediate textures - they will be lazily recreated
 840            // in draw() after we confirm the surface is healthy. This avoids
 841            // panics when the device/surface is in an invalid state during resize.
 842            self.path_intermediate_texture = None;
 843            self.path_intermediate_view = None;
 844            self.path_msaa_texture = None;
 845            self.path_msaa_view = None;
 846        }
 847    }
 848
 849    fn ensure_intermediate_textures(&mut self) {
 850        if self.path_intermediate_texture.is_some() {
 851            return;
 852        }
 853
 854        let (path_intermediate_texture, path_intermediate_view) = {
 855            let (t, v) = Self::create_path_intermediate(
 856                &self.device,
 857                self.surface_config.format,
 858                self.surface_config.width,
 859                self.surface_config.height,
 860            );
 861            (Some(t), Some(v))
 862        };
 863        self.path_intermediate_texture = path_intermediate_texture;
 864        self.path_intermediate_view = path_intermediate_view;
 865
 866        let (path_msaa_texture, path_msaa_view) = Self::create_msaa_if_needed(
 867            &self.device,
 868            self.surface_config.format,
 869            self.surface_config.width,
 870            self.surface_config.height,
 871            self.rendering_params.path_sample_count,
 872        )
 873        .map(|(t, v)| (Some(t), Some(v)))
 874        .unwrap_or((None, None));
 875        self.path_msaa_texture = path_msaa_texture;
 876        self.path_msaa_view = path_msaa_view;
 877    }
 878
 879    pub fn update_transparency(&mut self, transparent: bool) {
 880        let new_alpha_mode = if transparent {
 881            self.transparent_alpha_mode
 882        } else {
 883            self.opaque_alpha_mode
 884        };
 885
 886        if new_alpha_mode != self.surface_config.alpha_mode {
 887            self.surface_config.alpha_mode = new_alpha_mode;
 888            if self.surface_configured {
 889                self.surface.configure(&self.device, &self.surface_config);
 890            }
 891            self.pipelines = Self::create_pipelines(
 892                &self.device,
 893                &self.bind_group_layouts,
 894                self.surface_config.format,
 895                self.surface_config.alpha_mode,
 896                self.rendering_params.path_sample_count,
 897                self.dual_source_blending,
 898            );
 899        }
 900    }
 901
 902    #[allow(dead_code)]
 903    pub fn viewport_size(&self) -> Size<DevicePixels> {
 904        Size {
 905            width: DevicePixels(self.surface_config.width as i32),
 906            height: DevicePixels(self.surface_config.height as i32),
 907        }
 908    }
 909
 910    pub fn sprite_atlas(&self) -> &Arc<WgpuAtlas> {
 911        &self.atlas
 912    }
 913
 914    pub fn gpu_specs(&self) -> GpuSpecs {
 915        GpuSpecs {
 916            is_software_emulated: self.adapter_info.device_type == wgpu::DeviceType::Cpu,
 917            device_name: self.adapter_info.name.clone(),
 918            driver_name: self.adapter_info.driver.clone(),
 919            driver_info: self.adapter_info.driver_info.clone(),
 920        }
 921    }
 922
 923    pub fn max_texture_size(&self) -> u32 {
 924        self.max_texture_size
 925    }
 926
 927    pub fn draw(&mut self, scene: &Scene) {
 928        let last_error = self.last_error.lock().unwrap().take();
 929        if let Some(error) = last_error {
 930            self.failed_frame_count += 1;
 931            log::error!(
 932                "GPU error during frame (failure {} of 20): {error}",
 933                self.failed_frame_count
 934            );
 935            if self.failed_frame_count > 20 {
 936                panic!("Too many consecutive GPU errors. Last error: {error}");
 937            }
 938        } else {
 939            self.failed_frame_count = 0;
 940        }
 941
 942        self.atlas.before_frame();
 943
 944        let frame = match self.surface.get_current_texture() {
 945            Ok(frame) => frame,
 946            Err(wgpu::SurfaceError::Lost | wgpu::SurfaceError::Outdated) => {
 947                self.surface_configured = false;
 948                return;
 949            }
 950            Err(e) => {
 951                log::error!("Failed to acquire surface texture: {e}");
 952                return;
 953            }
 954        };
 955
 956        // Now that we know the surface is healthy, ensure intermediate textures exist
 957        self.ensure_intermediate_textures();
 958
 959        let frame_view = frame
 960            .texture
 961            .create_view(&wgpu::TextureViewDescriptor::default());
 962
 963        let gamma_params = GammaParams {
 964            gamma_ratios: self.rendering_params.gamma_ratios,
 965            grayscale_enhanced_contrast: self.rendering_params.grayscale_enhanced_contrast,
 966            subpixel_enhanced_contrast: self.rendering_params.subpixel_enhanced_contrast,
 967            _pad: [0.0; 2],
 968        };
 969
 970        let globals = GlobalParams {
 971            viewport_size: [
 972                self.surface_config.width as f32,
 973                self.surface_config.height as f32,
 974            ],
 975            premultiplied_alpha: if self.surface_config.alpha_mode
 976                == wgpu::CompositeAlphaMode::PreMultiplied
 977            {
 978                1
 979            } else {
 980                0
 981            },
 982            pad: 0,
 983        };
 984
 985        let path_globals = GlobalParams {
 986            premultiplied_alpha: 0,
 987            ..globals
 988        };
 989
 990        self.queue
 991            .write_buffer(&self.globals_buffer, 0, bytemuck::bytes_of(&globals));
 992        self.queue.write_buffer(
 993            &self.globals_buffer,
 994            self.path_globals_offset,
 995            bytemuck::bytes_of(&path_globals),
 996        );
 997        self.queue.write_buffer(
 998            &self.globals_buffer,
 999            self.gamma_offset,
1000            bytemuck::bytes_of(&gamma_params),
1001        );
1002
1003        loop {
1004            let mut instance_offset: u64 = 0;
1005            let mut overflow = false;
1006
1007            let mut encoder = self
1008                .device
1009                .create_command_encoder(&wgpu::CommandEncoderDescriptor {
1010                    label: Some("main_encoder"),
1011                });
1012
1013            {
1014                let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
1015                    label: Some("main_pass"),
1016                    color_attachments: &[Some(wgpu::RenderPassColorAttachment {
1017                        view: &frame_view,
1018                        resolve_target: None,
1019                        ops: wgpu::Operations {
1020                            load: wgpu::LoadOp::Clear(wgpu::Color::TRANSPARENT),
1021                            store: wgpu::StoreOp::Store,
1022                        },
1023                        depth_slice: None,
1024                    })],
1025                    depth_stencil_attachment: None,
1026                    ..Default::default()
1027                });
1028
1029                for batch in scene.batches() {
1030                    let ok = match batch {
1031                        PrimitiveBatch::Quads(range) => {
1032                            self.draw_quads(&scene.quads[range], &mut instance_offset, &mut pass)
1033                        }
1034                        PrimitiveBatch::Shadows(range) => self.draw_shadows(
1035                            &scene.shadows[range],
1036                            &mut instance_offset,
1037                            &mut pass,
1038                        ),
1039                        PrimitiveBatch::Paths(range) => {
1040                            let paths = &scene.paths[range];
1041                            if paths.is_empty() {
1042                                continue;
1043                            }
1044
1045                            drop(pass);
1046
1047                            let did_draw = self.draw_paths_to_intermediate(
1048                                &mut encoder,
1049                                paths,
1050                                &mut instance_offset,
1051                            );
1052
1053                            pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
1054                                label: Some("main_pass_continued"),
1055                                color_attachments: &[Some(wgpu::RenderPassColorAttachment {
1056                                    view: &frame_view,
1057                                    resolve_target: None,
1058                                    ops: wgpu::Operations {
1059                                        load: wgpu::LoadOp::Load,
1060                                        store: wgpu::StoreOp::Store,
1061                                    },
1062                                    depth_slice: None,
1063                                })],
1064                                depth_stencil_attachment: None,
1065                                ..Default::default()
1066                            });
1067
1068                            if did_draw {
1069                                self.draw_paths_from_intermediate(
1070                                    paths,
1071                                    &mut instance_offset,
1072                                    &mut pass,
1073                                )
1074                            } else {
1075                                false
1076                            }
1077                        }
1078                        PrimitiveBatch::Underlines(range) => self.draw_underlines(
1079                            &scene.underlines[range],
1080                            &mut instance_offset,
1081                            &mut pass,
1082                        ),
1083                        PrimitiveBatch::MonochromeSprites { texture_id, range } => self
1084                            .draw_monochrome_sprites(
1085                                &scene.monochrome_sprites[range],
1086                                texture_id,
1087                                &mut instance_offset,
1088                                &mut pass,
1089                            ),
1090                        PrimitiveBatch::SubpixelSprites { texture_id, range } => self
1091                            .draw_subpixel_sprites(
1092                                &scene.subpixel_sprites[range],
1093                                texture_id,
1094                                &mut instance_offset,
1095                                &mut pass,
1096                            ),
1097                        PrimitiveBatch::PolychromeSprites { texture_id, range } => self
1098                            .draw_polychrome_sprites(
1099                                &scene.polychrome_sprites[range],
1100                                texture_id,
1101                                &mut instance_offset,
1102                                &mut pass,
1103                            ),
1104                        PrimitiveBatch::Surfaces(_surfaces) => {
1105                            // Surfaces are macOS-only for video playback
1106                            // Not implemented for Linux/wgpu
1107                            true
1108                        }
1109                    };
1110                    if !ok {
1111                        overflow = true;
1112                        break;
1113                    }
1114                }
1115            }
1116
1117            if overflow {
1118                drop(encoder);
1119                if self.instance_buffer_capacity >= 256 * 1024 * 1024 {
1120                    log::error!(
1121                        "instance buffer size grew too large: {}",
1122                        self.instance_buffer_capacity
1123                    );
1124                    frame.present();
1125                    return;
1126                }
1127                self.grow_instance_buffer();
1128                continue;
1129            }
1130
1131            self.queue.submit(std::iter::once(encoder.finish()));
1132            frame.present();
1133            return;
1134        }
1135    }
1136
1137    fn draw_quads(
1138        &self,
1139        quads: &[Quad],
1140        instance_offset: &mut u64,
1141        pass: &mut wgpu::RenderPass<'_>,
1142    ) -> bool {
1143        let data = unsafe { Self::instance_bytes(quads) };
1144        self.draw_instances(
1145            data,
1146            quads.len() as u32,
1147            &self.pipelines.quads,
1148            instance_offset,
1149            pass,
1150        )
1151    }
1152
1153    fn draw_shadows(
1154        &self,
1155        shadows: &[Shadow],
1156        instance_offset: &mut u64,
1157        pass: &mut wgpu::RenderPass<'_>,
1158    ) -> bool {
1159        let data = unsafe { Self::instance_bytes(shadows) };
1160        self.draw_instances(
1161            data,
1162            shadows.len() as u32,
1163            &self.pipelines.shadows,
1164            instance_offset,
1165            pass,
1166        )
1167    }
1168
1169    fn draw_underlines(
1170        &self,
1171        underlines: &[Underline],
1172        instance_offset: &mut u64,
1173        pass: &mut wgpu::RenderPass<'_>,
1174    ) -> bool {
1175        let data = unsafe { Self::instance_bytes(underlines) };
1176        self.draw_instances(
1177            data,
1178            underlines.len() as u32,
1179            &self.pipelines.underlines,
1180            instance_offset,
1181            pass,
1182        )
1183    }
1184
1185    fn draw_monochrome_sprites(
1186        &self,
1187        sprites: &[MonochromeSprite],
1188        texture_id: AtlasTextureId,
1189        instance_offset: &mut u64,
1190        pass: &mut wgpu::RenderPass<'_>,
1191    ) -> bool {
1192        let tex_info = self.atlas.get_texture_info(texture_id);
1193        let data = unsafe { Self::instance_bytes(sprites) };
1194        self.draw_instances_with_texture(
1195            data,
1196            sprites.len() as u32,
1197            &tex_info.view,
1198            &self.pipelines.mono_sprites,
1199            instance_offset,
1200            pass,
1201        )
1202    }
1203
1204    fn draw_subpixel_sprites(
1205        &self,
1206        sprites: &[SubpixelSprite],
1207        texture_id: AtlasTextureId,
1208        instance_offset: &mut u64,
1209        pass: &mut wgpu::RenderPass<'_>,
1210    ) -> bool {
1211        let tex_info = self.atlas.get_texture_info(texture_id);
1212        let data = unsafe { Self::instance_bytes(sprites) };
1213        let pipeline = self
1214            .pipelines
1215            .subpixel_sprites
1216            .as_ref()
1217            .unwrap_or(&self.pipelines.mono_sprites);
1218        self.draw_instances_with_texture(
1219            data,
1220            sprites.len() as u32,
1221            &tex_info.view,
1222            pipeline,
1223            instance_offset,
1224            pass,
1225        )
1226    }
1227
1228    fn draw_polychrome_sprites(
1229        &self,
1230        sprites: &[PolychromeSprite],
1231        texture_id: AtlasTextureId,
1232        instance_offset: &mut u64,
1233        pass: &mut wgpu::RenderPass<'_>,
1234    ) -> bool {
1235        let tex_info = self.atlas.get_texture_info(texture_id);
1236        let data = unsafe { Self::instance_bytes(sprites) };
1237        self.draw_instances_with_texture(
1238            data,
1239            sprites.len() as u32,
1240            &tex_info.view,
1241            &self.pipelines.poly_sprites,
1242            instance_offset,
1243            pass,
1244        )
1245    }
1246
1247    fn draw_instances(
1248        &self,
1249        data: &[u8],
1250        instance_count: u32,
1251        pipeline: &wgpu::RenderPipeline,
1252        instance_offset: &mut u64,
1253        pass: &mut wgpu::RenderPass<'_>,
1254    ) -> bool {
1255        if instance_count == 0 {
1256            return true;
1257        }
1258        let Some((offset, size)) = self.write_to_instance_buffer(instance_offset, data) else {
1259            return false;
1260        };
1261        let bind_group = self.device.create_bind_group(&wgpu::BindGroupDescriptor {
1262            label: None,
1263            layout: &self.bind_group_layouts.instances,
1264            entries: &[wgpu::BindGroupEntry {
1265                binding: 0,
1266                resource: self.instance_binding(offset, size),
1267            }],
1268        });
1269        pass.set_pipeline(pipeline);
1270        pass.set_bind_group(0, &self.globals_bind_group, &[]);
1271        pass.set_bind_group(1, &bind_group, &[]);
1272        pass.draw(0..4, 0..instance_count);
1273        true
1274    }
1275
1276    fn draw_instances_with_texture(
1277        &self,
1278        data: &[u8],
1279        instance_count: u32,
1280        texture_view: &wgpu::TextureView,
1281        pipeline: &wgpu::RenderPipeline,
1282        instance_offset: &mut u64,
1283        pass: &mut wgpu::RenderPass<'_>,
1284    ) -> bool {
1285        if instance_count == 0 {
1286            return true;
1287        }
1288        let Some((offset, size)) = self.write_to_instance_buffer(instance_offset, data) else {
1289            return false;
1290        };
1291        let bind_group = self.device.create_bind_group(&wgpu::BindGroupDescriptor {
1292            label: None,
1293            layout: &self.bind_group_layouts.instances_with_texture,
1294            entries: &[
1295                wgpu::BindGroupEntry {
1296                    binding: 0,
1297                    resource: self.instance_binding(offset, size),
1298                },
1299                wgpu::BindGroupEntry {
1300                    binding: 1,
1301                    resource: wgpu::BindingResource::TextureView(texture_view),
1302                },
1303                wgpu::BindGroupEntry {
1304                    binding: 2,
1305                    resource: wgpu::BindingResource::Sampler(&self.atlas_sampler),
1306                },
1307            ],
1308        });
1309        pass.set_pipeline(pipeline);
1310        pass.set_bind_group(0, &self.globals_bind_group, &[]);
1311        pass.set_bind_group(1, &bind_group, &[]);
1312        pass.draw(0..4, 0..instance_count);
1313        true
1314    }
1315
1316    unsafe fn instance_bytes<T>(instances: &[T]) -> &[u8] {
1317        unsafe {
1318            std::slice::from_raw_parts(
1319                instances.as_ptr() as *const u8,
1320                std::mem::size_of_val(instances),
1321            )
1322        }
1323    }
1324
1325    fn draw_paths_from_intermediate(
1326        &self,
1327        paths: &[Path<ScaledPixels>],
1328        instance_offset: &mut u64,
1329        pass: &mut wgpu::RenderPass<'_>,
1330    ) -> bool {
1331        let first_path = &paths[0];
1332        let sprites: Vec<PathSprite> = if paths.last().map(|p| &p.order) == Some(&first_path.order)
1333        {
1334            paths
1335                .iter()
1336                .map(|p| PathSprite {
1337                    bounds: p.clipped_bounds(),
1338                })
1339                .collect()
1340        } else {
1341            let mut bounds = first_path.clipped_bounds();
1342            for path in paths.iter().skip(1) {
1343                bounds = bounds.union(&path.clipped_bounds());
1344            }
1345            vec![PathSprite { bounds }]
1346        };
1347
1348        let Some(path_intermediate_view) = self.path_intermediate_view.as_ref() else {
1349            return true;
1350        };
1351
1352        let sprite_data = unsafe { Self::instance_bytes(&sprites) };
1353        self.draw_instances_with_texture(
1354            sprite_data,
1355            sprites.len() as u32,
1356            path_intermediate_view,
1357            &self.pipelines.paths,
1358            instance_offset,
1359            pass,
1360        )
1361    }
1362
1363    fn draw_paths_to_intermediate(
1364        &self,
1365        encoder: &mut wgpu::CommandEncoder,
1366        paths: &[Path<ScaledPixels>],
1367        instance_offset: &mut u64,
1368    ) -> bool {
1369        let mut vertices = Vec::new();
1370        for path in paths {
1371            let bounds = path.clipped_bounds();
1372            vertices.extend(path.vertices.iter().map(|v| PathRasterizationVertex {
1373                xy_position: v.xy_position,
1374                st_position: v.st_position,
1375                color: path.color,
1376                bounds,
1377            }));
1378        }
1379
1380        if vertices.is_empty() {
1381            return true;
1382        }
1383
1384        let vertex_data = unsafe { Self::instance_bytes(&vertices) };
1385        let Some((vertex_offset, vertex_size)) =
1386            self.write_to_instance_buffer(instance_offset, vertex_data)
1387        else {
1388            return false;
1389        };
1390
1391        let data_bind_group = self.device.create_bind_group(&wgpu::BindGroupDescriptor {
1392            label: Some("path_rasterization_bind_group"),
1393            layout: &self.bind_group_layouts.instances,
1394            entries: &[wgpu::BindGroupEntry {
1395                binding: 0,
1396                resource: self.instance_binding(vertex_offset, vertex_size),
1397            }],
1398        });
1399
1400        let Some(path_intermediate_view) = self.path_intermediate_view.as_ref() else {
1401            return true;
1402        };
1403
1404        let (target_view, resolve_target) = if let Some(ref msaa_view) = self.path_msaa_view {
1405            (msaa_view, Some(path_intermediate_view))
1406        } else {
1407            (path_intermediate_view, None)
1408        };
1409
1410        {
1411            let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
1412                label: Some("path_rasterization_pass"),
1413                color_attachments: &[Some(wgpu::RenderPassColorAttachment {
1414                    view: target_view,
1415                    resolve_target,
1416                    ops: wgpu::Operations {
1417                        load: wgpu::LoadOp::Clear(wgpu::Color::TRANSPARENT),
1418                        store: wgpu::StoreOp::Store,
1419                    },
1420                    depth_slice: None,
1421                })],
1422                depth_stencil_attachment: None,
1423                ..Default::default()
1424            });
1425
1426            pass.set_pipeline(&self.pipelines.path_rasterization);
1427            pass.set_bind_group(0, &self.path_globals_bind_group, &[]);
1428            pass.set_bind_group(1, &data_bind_group, &[]);
1429            pass.draw(0..vertices.len() as u32, 0..1);
1430        }
1431
1432        true
1433    }
1434
1435    fn grow_instance_buffer(&mut self) {
1436        let new_capacity = self.instance_buffer_capacity * 2;
1437        log::info!("increased instance buffer size to {}", new_capacity);
1438        self.instance_buffer = self.device.create_buffer(&wgpu::BufferDescriptor {
1439            label: Some("instance_buffer"),
1440            size: new_capacity,
1441            usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
1442            mapped_at_creation: false,
1443        });
1444        self.instance_buffer_capacity = new_capacity;
1445    }
1446
1447    fn write_to_instance_buffer(
1448        &self,
1449        instance_offset: &mut u64,
1450        data: &[u8],
1451    ) -> Option<(u64, NonZeroU64)> {
1452        let offset = (*instance_offset).next_multiple_of(self.storage_buffer_alignment);
1453        let size = (data.len() as u64).max(16);
1454        if offset + size > self.instance_buffer_capacity {
1455            return None;
1456        }
1457        self.queue.write_buffer(&self.instance_buffer, offset, data);
1458        *instance_offset = offset + size;
1459        Some((offset, NonZeroU64::new(size).expect("size is at least 16")))
1460    }
1461
1462    fn instance_binding(&self, offset: u64, size: NonZeroU64) -> wgpu::BindingResource<'_> {
1463        wgpu::BindingResource::Buffer(wgpu::BufferBinding {
1464            buffer: &self.instance_buffer,
1465            offset,
1466            size: Some(size),
1467        })
1468    }
1469
1470    pub fn destroy(&mut self) {
1471        // wgpu resources are automatically cleaned up when dropped
1472    }
1473}
1474
1475struct RenderingParameters {
1476    path_sample_count: u32,
1477    gamma_ratios: [f32; 4],
1478    grayscale_enhanced_contrast: f32,
1479    subpixel_enhanced_contrast: f32,
1480}
1481
1482impl RenderingParameters {
1483    fn new(adapter: &wgpu::Adapter, surface_format: wgpu::TextureFormat) -> Self {
1484        use std::env;
1485
1486        let format_features = adapter.get_texture_format_features(surface_format);
1487        let path_sample_count = [4, 2, 1]
1488            .into_iter()
1489            .find(|&n| format_features.flags.sample_count_supported(n))
1490            .unwrap_or(1);
1491
1492        let gamma = env::var("ZED_FONTS_GAMMA")
1493            .ok()
1494            .and_then(|v| v.parse().ok())
1495            .unwrap_or(1.8_f32)
1496            .clamp(1.0, 2.2);
1497        let gamma_ratios = get_gamma_correction_ratios(gamma);
1498
1499        let grayscale_enhanced_contrast = env::var("ZED_FONTS_GRAYSCALE_ENHANCED_CONTRAST")
1500            .ok()
1501            .and_then(|v| v.parse().ok())
1502            .unwrap_or(1.0_f32)
1503            .max(0.0);
1504
1505        let subpixel_enhanced_contrast = env::var("ZED_FONTS_SUBPIXEL_ENHANCED_CONTRAST")
1506            .ok()
1507            .and_then(|v| v.parse().ok())
1508            .unwrap_or(0.5_f32)
1509            .max(0.0);
1510
1511        Self {
1512            path_sample_count,
1513            gamma_ratios,
1514            grayscale_enhanced_contrast,
1515            subpixel_enhanced_contrast,
1516        }
1517    }
1518}