1use crate::{CompositorGpuHint, WgpuAtlas, WgpuContext};
2use bytemuck::{Pod, Zeroable};
3use gpui::{
4 AtlasTextureId, Background, Bounds, DevicePixels, GpuSpecs, MonochromeSprite, Path, Point,
5 PolychromeSprite, PrimitiveBatch, Quad, ScaledPixels, Scene, Shadow, Size, SubpixelSprite,
6 Underline, get_gamma_correction_ratios,
7};
8use log::warn;
9#[cfg(not(target_family = "wasm"))]
10use raw_window_handle::{HasDisplayHandle, HasWindowHandle};
11use std::cell::RefCell;
12use std::num::NonZeroU64;
13use std::rc::Rc;
14use std::sync::{Arc, Mutex};
15
16#[repr(C)]
17#[derive(Clone, Copy, Pod, Zeroable)]
18struct GlobalParams {
19 viewport_size: [f32; 2],
20 premultiplied_alpha: u32,
21 pad: u32,
22}
23
24#[repr(C)]
25#[derive(Clone, Copy, Pod, Zeroable)]
26struct PodBounds {
27 origin: [f32; 2],
28 size: [f32; 2],
29}
30
31impl From<Bounds<ScaledPixels>> for PodBounds {
32 fn from(bounds: Bounds<ScaledPixels>) -> Self {
33 Self {
34 origin: [bounds.origin.x.0, bounds.origin.y.0],
35 size: [bounds.size.width.0, bounds.size.height.0],
36 }
37 }
38}
39
40#[repr(C)]
41#[derive(Clone, Copy, Pod, Zeroable)]
42struct SurfaceParams {
43 bounds: PodBounds,
44 content_mask: PodBounds,
45}
46
47#[repr(C)]
48#[derive(Clone, Copy, Pod, Zeroable)]
49struct GammaParams {
50 gamma_ratios: [f32; 4],
51 grayscale_enhanced_contrast: f32,
52 subpixel_enhanced_contrast: f32,
53 _pad: [f32; 2],
54}
55
56#[derive(Clone, Debug)]
57#[repr(C)]
58struct PathSprite {
59 bounds: Bounds<ScaledPixels>,
60}
61
62#[derive(Clone, Debug)]
63#[repr(C)]
64struct PathRasterizationVertex {
65 xy_position: Point<ScaledPixels>,
66 st_position: Point<f32>,
67 color: Background,
68 bounds: Bounds<ScaledPixels>,
69}
70
71pub struct WgpuSurfaceConfig {
72 pub size: Size<DevicePixels>,
73 pub transparent: bool,
74 /// Preferred presentation mode. When `Some`, the renderer will use this
75 /// mode if supported by the surface, falling back to `Fifo`.
76 /// When `None`, defaults to `Fifo` (VSync).
77 ///
78 /// Mobile platforms may prefer `Mailbox` (triple-buffering) to avoid
79 /// blocking in `get_current_texture()` during lifecycle transitions.
80 pub preferred_present_mode: Option<wgpu::PresentMode>,
81}
82
83struct WgpuPipelines {
84 quads: wgpu::RenderPipeline,
85 shadows: wgpu::RenderPipeline,
86 path_rasterization: wgpu::RenderPipeline,
87 paths: wgpu::RenderPipeline,
88 underlines: wgpu::RenderPipeline,
89 mono_sprites: wgpu::RenderPipeline,
90 subpixel_sprites: Option<wgpu::RenderPipeline>,
91 poly_sprites: wgpu::RenderPipeline,
92 #[allow(dead_code)]
93 surfaces: wgpu::RenderPipeline,
94}
95
96struct WgpuBindGroupLayouts {
97 globals: wgpu::BindGroupLayout,
98 instances: wgpu::BindGroupLayout,
99 instances_with_texture: wgpu::BindGroupLayout,
100 surfaces: wgpu::BindGroupLayout,
101}
102
103/// Shared GPU context reference, used to coordinate device recovery across multiple windows.
104pub type GpuContext = Rc<RefCell<Option<WgpuContext>>>;
105
106/// GPU resources that must be dropped together during device recovery.
107struct WgpuResources {
108 device: Arc<wgpu::Device>,
109 queue: Arc<wgpu::Queue>,
110 surface: wgpu::Surface<'static>,
111 pipelines: WgpuPipelines,
112 bind_group_layouts: WgpuBindGroupLayouts,
113 atlas_sampler: wgpu::Sampler,
114 globals_buffer: wgpu::Buffer,
115 globals_bind_group: wgpu::BindGroup,
116 path_globals_bind_group: wgpu::BindGroup,
117 instance_buffer: wgpu::Buffer,
118 path_intermediate_texture: Option<wgpu::Texture>,
119 path_intermediate_view: Option<wgpu::TextureView>,
120 path_msaa_texture: Option<wgpu::Texture>,
121 path_msaa_view: Option<wgpu::TextureView>,
122}
123
124pub struct WgpuRenderer {
125 /// Shared GPU context for device recovery coordination (unused on WASM).
126 #[allow(dead_code)]
127 context: Option<GpuContext>,
128 /// Compositor GPU hint for adapter selection (unused on WASM).
129 #[allow(dead_code)]
130 compositor_gpu: Option<CompositorGpuHint>,
131 resources: Option<WgpuResources>,
132 surface_config: wgpu::SurfaceConfiguration,
133 atlas: Arc<WgpuAtlas>,
134 path_globals_offset: u64,
135 gamma_offset: u64,
136 instance_buffer_capacity: u64,
137 max_buffer_size: u64,
138 storage_buffer_alignment: u64,
139 rendering_params: RenderingParameters,
140 dual_source_blending: bool,
141 adapter_info: wgpu::AdapterInfo,
142 transparent_alpha_mode: wgpu::CompositeAlphaMode,
143 opaque_alpha_mode: wgpu::CompositeAlphaMode,
144 max_texture_size: u32,
145 last_error: Arc<Mutex<Option<String>>>,
146 failed_frame_count: u32,
147 device_lost: std::sync::Arc<std::sync::atomic::AtomicBool>,
148 surface_configured: bool,
149}
150
151impl WgpuRenderer {
152 fn resources(&self) -> &WgpuResources {
153 self.resources
154 .as_ref()
155 .expect("GPU resources not available")
156 }
157
158 fn resources_mut(&mut self) -> &mut WgpuResources {
159 self.resources
160 .as_mut()
161 .expect("GPU resources not available")
162 }
163
164 /// Creates a new WgpuRenderer from raw window handles.
165 ///
166 /// The `gpu_context` is a shared reference that coordinates GPU context across
167 /// multiple windows. The first window to create a renderer will initialize the
168 /// context; subsequent windows will share it.
169 ///
170 /// # Safety
171 /// The caller must ensure that the window handle remains valid for the lifetime
172 /// of the returned renderer.
173 #[cfg(not(target_family = "wasm"))]
174 pub fn new<W>(
175 gpu_context: GpuContext,
176 window: &W,
177 config: WgpuSurfaceConfig,
178 compositor_gpu: Option<CompositorGpuHint>,
179 ) -> anyhow::Result<Self>
180 where
181 W: HasWindowHandle + HasDisplayHandle + std::fmt::Debug + Send + Sync + Clone + 'static,
182 {
183 let window_handle = window
184 .window_handle()
185 .map_err(|e| anyhow::anyhow!("Failed to get window handle: {e}"))?;
186
187 let target = wgpu::SurfaceTargetUnsafe::RawHandle {
188 // Fall back to the display handle already provided via InstanceDescriptor::display.
189 raw_display_handle: None,
190 raw_window_handle: window_handle.as_raw(),
191 };
192
193 // Use the existing context's instance if available, otherwise create a new one.
194 // The surface must be created with the same instance that will be used for
195 // adapter selection, otherwise wgpu will panic.
196 let instance = gpu_context
197 .borrow()
198 .as_ref()
199 .map(|ctx| ctx.instance.clone())
200 .unwrap_or_else(|| WgpuContext::instance(Box::new(window.clone())));
201
202 // Safety: The caller guarantees that the window handle is valid for the
203 // lifetime of this renderer. In practice, the RawWindow struct is created
204 // from the native window handles and the surface is dropped before the window.
205 let surface = unsafe {
206 instance
207 .create_surface_unsafe(target)
208 .map_err(|e| anyhow::anyhow!("Failed to create surface: {e}"))?
209 };
210
211 let mut ctx_ref = gpu_context.borrow_mut();
212 let context = match ctx_ref.as_mut() {
213 Some(context) => {
214 context.check_compatible_with_surface(&surface)?;
215 context
216 }
217 None => ctx_ref.insert(WgpuContext::new(instance, &surface, compositor_gpu)?),
218 };
219
220 let atlas = Arc::new(WgpuAtlas::new(
221 Arc::clone(&context.device),
222 Arc::clone(&context.queue),
223 ));
224
225 Self::new_internal(
226 Some(Rc::clone(&gpu_context)),
227 context,
228 surface,
229 config,
230 compositor_gpu,
231 atlas,
232 )
233 }
234
235 #[cfg(target_family = "wasm")]
236 pub fn new_from_canvas(
237 context: &WgpuContext,
238 canvas: &web_sys::HtmlCanvasElement,
239 config: WgpuSurfaceConfig,
240 ) -> anyhow::Result<Self> {
241 let surface = context
242 .instance
243 .create_surface(wgpu::SurfaceTarget::Canvas(canvas.clone()))
244 .map_err(|e| anyhow::anyhow!("Failed to create surface: {e}"))?;
245
246 let atlas = Arc::new(WgpuAtlas::new(
247 Arc::clone(&context.device),
248 Arc::clone(&context.queue),
249 ));
250
251 Self::new_internal(None, context, surface, config, None, atlas)
252 }
253
254 fn new_internal(
255 gpu_context: Option<GpuContext>,
256 context: &WgpuContext,
257 surface: wgpu::Surface<'static>,
258 config: WgpuSurfaceConfig,
259 compositor_gpu: Option<CompositorGpuHint>,
260 atlas: Arc<WgpuAtlas>,
261 ) -> anyhow::Result<Self> {
262 let surface_caps = surface.get_capabilities(&context.adapter);
263 let preferred_formats = [
264 wgpu::TextureFormat::Bgra8Unorm,
265 wgpu::TextureFormat::Rgba8Unorm,
266 ];
267 let surface_format = preferred_formats
268 .iter()
269 .find(|f| surface_caps.formats.contains(f))
270 .copied()
271 .or_else(|| surface_caps.formats.iter().find(|f| !f.is_srgb()).copied())
272 .or_else(|| surface_caps.formats.first().copied())
273 .ok_or_else(|| {
274 anyhow::anyhow!(
275 "Surface reports no supported texture formats for adapter {:?}",
276 context.adapter.get_info().name
277 )
278 })?;
279
280 let pick_alpha_mode =
281 |preferences: &[wgpu::CompositeAlphaMode]| -> anyhow::Result<wgpu::CompositeAlphaMode> {
282 preferences
283 .iter()
284 .find(|p| surface_caps.alpha_modes.contains(p))
285 .copied()
286 .or_else(|| surface_caps.alpha_modes.first().copied())
287 .ok_or_else(|| {
288 anyhow::anyhow!(
289 "Surface reports no supported alpha modes for adapter {:?}",
290 context.adapter.get_info().name
291 )
292 })
293 };
294
295 let transparent_alpha_mode = pick_alpha_mode(&[
296 wgpu::CompositeAlphaMode::PreMultiplied,
297 wgpu::CompositeAlphaMode::Inherit,
298 ])?;
299
300 let opaque_alpha_mode = pick_alpha_mode(&[
301 wgpu::CompositeAlphaMode::Opaque,
302 wgpu::CompositeAlphaMode::Inherit,
303 ])?;
304
305 let alpha_mode = if config.transparent {
306 transparent_alpha_mode
307 } else {
308 opaque_alpha_mode
309 };
310
311 let device = Arc::clone(&context.device);
312 let max_texture_size = device.limits().max_texture_dimension_2d;
313
314 let requested_width = config.size.width.0 as u32;
315 let requested_height = config.size.height.0 as u32;
316 let clamped_width = requested_width.min(max_texture_size);
317 let clamped_height = requested_height.min(max_texture_size);
318
319 if clamped_width != requested_width || clamped_height != requested_height {
320 warn!(
321 "Requested surface size ({}, {}) exceeds maximum texture dimension {}. \
322 Clamping to ({}, {}). Window content may not fill the entire window.",
323 requested_width, requested_height, max_texture_size, clamped_width, clamped_height
324 );
325 }
326
327 let surface_config = wgpu::SurfaceConfiguration {
328 usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
329 format: surface_format,
330 width: clamped_width.max(1),
331 height: clamped_height.max(1),
332 present_mode: config
333 .preferred_present_mode
334 .filter(|mode| surface_caps.present_modes.contains(mode))
335 .unwrap_or(wgpu::PresentMode::Fifo),
336 desired_maximum_frame_latency: 2,
337 alpha_mode,
338 view_formats: vec![],
339 };
340 // Configure the surface immediately. The adapter selection process already validated
341 // that this adapter can successfully configure this surface.
342 surface.configure(&context.device, &surface_config);
343
344 let queue = Arc::clone(&context.queue);
345 let dual_source_blending = context.supports_dual_source_blending();
346
347 let rendering_params = RenderingParameters::new(&context.adapter, surface_format);
348 let bind_group_layouts = Self::create_bind_group_layouts(&device);
349 let pipelines = Self::create_pipelines(
350 &device,
351 &bind_group_layouts,
352 surface_format,
353 alpha_mode,
354 rendering_params.path_sample_count,
355 dual_source_blending,
356 );
357
358 let atlas_sampler = device.create_sampler(&wgpu::SamplerDescriptor {
359 label: Some("atlas_sampler"),
360 mag_filter: wgpu::FilterMode::Linear,
361 min_filter: wgpu::FilterMode::Linear,
362 ..Default::default()
363 });
364
365 let uniform_alignment = device.limits().min_uniform_buffer_offset_alignment as u64;
366 let globals_size = std::mem::size_of::<GlobalParams>() as u64;
367 let gamma_size = std::mem::size_of::<GammaParams>() as u64;
368 let path_globals_offset = globals_size.next_multiple_of(uniform_alignment);
369 let gamma_offset = (path_globals_offset + globals_size).next_multiple_of(uniform_alignment);
370
371 let globals_buffer = device.create_buffer(&wgpu::BufferDescriptor {
372 label: Some("globals_buffer"),
373 size: gamma_offset + gamma_size,
374 usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
375 mapped_at_creation: false,
376 });
377
378 let max_buffer_size = device.limits().max_buffer_size;
379 let storage_buffer_alignment = device.limits().min_storage_buffer_offset_alignment as u64;
380 let initial_instance_buffer_capacity = 2 * 1024 * 1024;
381 let instance_buffer = device.create_buffer(&wgpu::BufferDescriptor {
382 label: Some("instance_buffer"),
383 size: initial_instance_buffer_capacity,
384 usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
385 mapped_at_creation: false,
386 });
387
388 let globals_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
389 label: Some("globals_bind_group"),
390 layout: &bind_group_layouts.globals,
391 entries: &[
392 wgpu::BindGroupEntry {
393 binding: 0,
394 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
395 buffer: &globals_buffer,
396 offset: 0,
397 size: Some(NonZeroU64::new(globals_size).unwrap()),
398 }),
399 },
400 wgpu::BindGroupEntry {
401 binding: 1,
402 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
403 buffer: &globals_buffer,
404 offset: gamma_offset,
405 size: Some(NonZeroU64::new(gamma_size).unwrap()),
406 }),
407 },
408 ],
409 });
410
411 let path_globals_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
412 label: Some("path_globals_bind_group"),
413 layout: &bind_group_layouts.globals,
414 entries: &[
415 wgpu::BindGroupEntry {
416 binding: 0,
417 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
418 buffer: &globals_buffer,
419 offset: path_globals_offset,
420 size: Some(NonZeroU64::new(globals_size).unwrap()),
421 }),
422 },
423 wgpu::BindGroupEntry {
424 binding: 1,
425 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
426 buffer: &globals_buffer,
427 offset: gamma_offset,
428 size: Some(NonZeroU64::new(gamma_size).unwrap()),
429 }),
430 },
431 ],
432 });
433
434 let adapter_info = context.adapter.get_info();
435
436 let last_error: Arc<Mutex<Option<String>>> = Arc::new(Mutex::new(None));
437 let last_error_clone = Arc::clone(&last_error);
438 device.on_uncaptured_error(Arc::new(move |error| {
439 let mut guard = last_error_clone.lock().unwrap();
440 *guard = Some(error.to_string());
441 }));
442
443 let resources = WgpuResources {
444 device,
445 queue,
446 surface,
447 pipelines,
448 bind_group_layouts,
449 atlas_sampler,
450 globals_buffer,
451 globals_bind_group,
452 path_globals_bind_group,
453 instance_buffer,
454 // Defer intermediate texture creation to first draw call via ensure_intermediate_textures().
455 // This avoids panics when the device/surface is in an invalid state during initialization.
456 path_intermediate_texture: None,
457 path_intermediate_view: None,
458 path_msaa_texture: None,
459 path_msaa_view: None,
460 };
461
462 Ok(Self {
463 context: gpu_context,
464 compositor_gpu,
465 resources: Some(resources),
466 surface_config,
467 atlas,
468 path_globals_offset,
469 gamma_offset,
470 instance_buffer_capacity: initial_instance_buffer_capacity,
471 max_buffer_size,
472 storage_buffer_alignment,
473 rendering_params,
474 dual_source_blending,
475 adapter_info,
476 transparent_alpha_mode,
477 opaque_alpha_mode,
478 max_texture_size,
479 last_error,
480 failed_frame_count: 0,
481 device_lost: context.device_lost_flag(),
482 surface_configured: true,
483 })
484 }
485
486 fn create_bind_group_layouts(device: &wgpu::Device) -> WgpuBindGroupLayouts {
487 let globals =
488 device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
489 label: Some("globals_layout"),
490 entries: &[
491 wgpu::BindGroupLayoutEntry {
492 binding: 0,
493 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
494 ty: wgpu::BindingType::Buffer {
495 ty: wgpu::BufferBindingType::Uniform,
496 has_dynamic_offset: false,
497 min_binding_size: NonZeroU64::new(
498 std::mem::size_of::<GlobalParams>() as u64
499 ),
500 },
501 count: None,
502 },
503 wgpu::BindGroupLayoutEntry {
504 binding: 1,
505 visibility: wgpu::ShaderStages::FRAGMENT,
506 ty: wgpu::BindingType::Buffer {
507 ty: wgpu::BufferBindingType::Uniform,
508 has_dynamic_offset: false,
509 min_binding_size: NonZeroU64::new(
510 std::mem::size_of::<GammaParams>() as u64
511 ),
512 },
513 count: None,
514 },
515 ],
516 });
517
518 let storage_buffer_entry = |binding: u32| wgpu::BindGroupLayoutEntry {
519 binding,
520 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
521 ty: wgpu::BindingType::Buffer {
522 ty: wgpu::BufferBindingType::Storage { read_only: true },
523 has_dynamic_offset: false,
524 min_binding_size: None,
525 },
526 count: None,
527 };
528
529 let instances = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
530 label: Some("instances_layout"),
531 entries: &[storage_buffer_entry(0)],
532 });
533
534 let instances_with_texture =
535 device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
536 label: Some("instances_with_texture_layout"),
537 entries: &[
538 storage_buffer_entry(0),
539 wgpu::BindGroupLayoutEntry {
540 binding: 1,
541 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
542 ty: wgpu::BindingType::Texture {
543 sample_type: wgpu::TextureSampleType::Float { filterable: true },
544 view_dimension: wgpu::TextureViewDimension::D2,
545 multisampled: false,
546 },
547 count: None,
548 },
549 wgpu::BindGroupLayoutEntry {
550 binding: 2,
551 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
552 ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
553 count: None,
554 },
555 ],
556 });
557
558 let surfaces = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
559 label: Some("surfaces_layout"),
560 entries: &[
561 wgpu::BindGroupLayoutEntry {
562 binding: 0,
563 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
564 ty: wgpu::BindingType::Buffer {
565 ty: wgpu::BufferBindingType::Uniform,
566 has_dynamic_offset: false,
567 min_binding_size: NonZeroU64::new(
568 std::mem::size_of::<SurfaceParams>() as u64
569 ),
570 },
571 count: None,
572 },
573 wgpu::BindGroupLayoutEntry {
574 binding: 1,
575 visibility: wgpu::ShaderStages::FRAGMENT,
576 ty: wgpu::BindingType::Texture {
577 sample_type: wgpu::TextureSampleType::Float { filterable: true },
578 view_dimension: wgpu::TextureViewDimension::D2,
579 multisampled: false,
580 },
581 count: None,
582 },
583 wgpu::BindGroupLayoutEntry {
584 binding: 2,
585 visibility: wgpu::ShaderStages::FRAGMENT,
586 ty: wgpu::BindingType::Texture {
587 sample_type: wgpu::TextureSampleType::Float { filterable: true },
588 view_dimension: wgpu::TextureViewDimension::D2,
589 multisampled: false,
590 },
591 count: None,
592 },
593 wgpu::BindGroupLayoutEntry {
594 binding: 3,
595 visibility: wgpu::ShaderStages::FRAGMENT,
596 ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
597 count: None,
598 },
599 ],
600 });
601
602 WgpuBindGroupLayouts {
603 globals,
604 instances,
605 instances_with_texture,
606 surfaces,
607 }
608 }
609
610 fn create_pipelines(
611 device: &wgpu::Device,
612 layouts: &WgpuBindGroupLayouts,
613 surface_format: wgpu::TextureFormat,
614 alpha_mode: wgpu::CompositeAlphaMode,
615 path_sample_count: u32,
616 dual_source_blending: bool,
617 ) -> WgpuPipelines {
618 let base_shader_source = include_str!("shaders.wgsl");
619 let shader_module = device.create_shader_module(wgpu::ShaderModuleDescriptor {
620 label: Some("gpui_shaders"),
621 source: wgpu::ShaderSource::Wgsl(std::borrow::Cow::Borrowed(base_shader_source)),
622 });
623
624 let subpixel_shader_source = include_str!("shaders_subpixel.wgsl");
625 let subpixel_shader_module = if dual_source_blending {
626 let combined = format!(
627 "enable dual_source_blending;\n{base_shader_source}\n{subpixel_shader_source}"
628 );
629 Some(device.create_shader_module(wgpu::ShaderModuleDescriptor {
630 label: Some("gpui_subpixel_shaders"),
631 source: wgpu::ShaderSource::Wgsl(std::borrow::Cow::Owned(combined)),
632 }))
633 } else {
634 None
635 };
636
637 let blend_mode = match alpha_mode {
638 wgpu::CompositeAlphaMode::PreMultiplied => {
639 wgpu::BlendState::PREMULTIPLIED_ALPHA_BLENDING
640 }
641 _ => wgpu::BlendState::ALPHA_BLENDING,
642 };
643
644 let color_target = wgpu::ColorTargetState {
645 format: surface_format,
646 blend: Some(blend_mode),
647 write_mask: wgpu::ColorWrites::ALL,
648 };
649
650 let create_pipeline = |name: &str,
651 vs_entry: &str,
652 fs_entry: &str,
653 globals_layout: &wgpu::BindGroupLayout,
654 data_layout: &wgpu::BindGroupLayout,
655 topology: wgpu::PrimitiveTopology,
656 color_targets: &[Option<wgpu::ColorTargetState>],
657 sample_count: u32,
658 module: &wgpu::ShaderModule| {
659 let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
660 label: Some(&format!("{name}_layout")),
661 bind_group_layouts: &[Some(globals_layout), Some(data_layout)],
662 immediate_size: 0,
663 });
664
665 device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
666 label: Some(name),
667 layout: Some(&pipeline_layout),
668 vertex: wgpu::VertexState {
669 module,
670 entry_point: Some(vs_entry),
671 buffers: &[],
672 compilation_options: wgpu::PipelineCompilationOptions::default(),
673 },
674 fragment: Some(wgpu::FragmentState {
675 module,
676 entry_point: Some(fs_entry),
677 targets: color_targets,
678 compilation_options: wgpu::PipelineCompilationOptions::default(),
679 }),
680 primitive: wgpu::PrimitiveState {
681 topology,
682 strip_index_format: None,
683 front_face: wgpu::FrontFace::Ccw,
684 cull_mode: None,
685 polygon_mode: wgpu::PolygonMode::Fill,
686 unclipped_depth: false,
687 conservative: false,
688 },
689 depth_stencil: None,
690 multisample: wgpu::MultisampleState {
691 count: sample_count,
692 mask: !0,
693 alpha_to_coverage_enabled: false,
694 },
695 multiview_mask: None,
696 cache: None,
697 })
698 };
699
700 let quads = create_pipeline(
701 "quads",
702 "vs_quad",
703 "fs_quad",
704 &layouts.globals,
705 &layouts.instances,
706 wgpu::PrimitiveTopology::TriangleStrip,
707 &[Some(color_target.clone())],
708 1,
709 &shader_module,
710 );
711
712 let shadows = create_pipeline(
713 "shadows",
714 "vs_shadow",
715 "fs_shadow",
716 &layouts.globals,
717 &layouts.instances,
718 wgpu::PrimitiveTopology::TriangleStrip,
719 &[Some(color_target.clone())],
720 1,
721 &shader_module,
722 );
723
724 let path_rasterization = create_pipeline(
725 "path_rasterization",
726 "vs_path_rasterization",
727 "fs_path_rasterization",
728 &layouts.globals,
729 &layouts.instances,
730 wgpu::PrimitiveTopology::TriangleList,
731 &[Some(wgpu::ColorTargetState {
732 format: surface_format,
733 blend: Some(wgpu::BlendState::PREMULTIPLIED_ALPHA_BLENDING),
734 write_mask: wgpu::ColorWrites::ALL,
735 })],
736 path_sample_count,
737 &shader_module,
738 );
739
740 let paths_blend = wgpu::BlendState {
741 color: wgpu::BlendComponent {
742 src_factor: wgpu::BlendFactor::One,
743 dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
744 operation: wgpu::BlendOperation::Add,
745 },
746 alpha: wgpu::BlendComponent {
747 src_factor: wgpu::BlendFactor::One,
748 dst_factor: wgpu::BlendFactor::One,
749 operation: wgpu::BlendOperation::Add,
750 },
751 };
752
753 let paths = create_pipeline(
754 "paths",
755 "vs_path",
756 "fs_path",
757 &layouts.globals,
758 &layouts.instances_with_texture,
759 wgpu::PrimitiveTopology::TriangleStrip,
760 &[Some(wgpu::ColorTargetState {
761 format: surface_format,
762 blend: Some(paths_blend),
763 write_mask: wgpu::ColorWrites::ALL,
764 })],
765 1,
766 &shader_module,
767 );
768
769 let underlines = create_pipeline(
770 "underlines",
771 "vs_underline",
772 "fs_underline",
773 &layouts.globals,
774 &layouts.instances,
775 wgpu::PrimitiveTopology::TriangleStrip,
776 &[Some(color_target.clone())],
777 1,
778 &shader_module,
779 );
780
781 let mono_sprites = create_pipeline(
782 "mono_sprites",
783 "vs_mono_sprite",
784 "fs_mono_sprite",
785 &layouts.globals,
786 &layouts.instances_with_texture,
787 wgpu::PrimitiveTopology::TriangleStrip,
788 &[Some(color_target.clone())],
789 1,
790 &shader_module,
791 );
792
793 let subpixel_sprites = if let Some(subpixel_module) = &subpixel_shader_module {
794 let subpixel_blend = wgpu::BlendState {
795 color: wgpu::BlendComponent {
796 src_factor: wgpu::BlendFactor::Src1,
797 dst_factor: wgpu::BlendFactor::OneMinusSrc1,
798 operation: wgpu::BlendOperation::Add,
799 },
800 alpha: wgpu::BlendComponent {
801 src_factor: wgpu::BlendFactor::One,
802 dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
803 operation: wgpu::BlendOperation::Add,
804 },
805 };
806
807 Some(create_pipeline(
808 "subpixel_sprites",
809 "vs_subpixel_sprite",
810 "fs_subpixel_sprite",
811 &layouts.globals,
812 &layouts.instances_with_texture,
813 wgpu::PrimitiveTopology::TriangleStrip,
814 &[Some(wgpu::ColorTargetState {
815 format: surface_format,
816 blend: Some(subpixel_blend),
817 write_mask: wgpu::ColorWrites::COLOR,
818 })],
819 1,
820 subpixel_module,
821 ))
822 } else {
823 None
824 };
825
826 let poly_sprites = create_pipeline(
827 "poly_sprites",
828 "vs_poly_sprite",
829 "fs_poly_sprite",
830 &layouts.globals,
831 &layouts.instances_with_texture,
832 wgpu::PrimitiveTopology::TriangleStrip,
833 &[Some(color_target.clone())],
834 1,
835 &shader_module,
836 );
837
838 let surfaces = create_pipeline(
839 "surfaces",
840 "vs_surface",
841 "fs_surface",
842 &layouts.globals,
843 &layouts.surfaces,
844 wgpu::PrimitiveTopology::TriangleStrip,
845 &[Some(color_target)],
846 1,
847 &shader_module,
848 );
849
850 WgpuPipelines {
851 quads,
852 shadows,
853 path_rasterization,
854 paths,
855 underlines,
856 mono_sprites,
857 subpixel_sprites,
858 poly_sprites,
859 surfaces,
860 }
861 }
862
863 fn create_path_intermediate(
864 device: &wgpu::Device,
865 format: wgpu::TextureFormat,
866 width: u32,
867 height: u32,
868 ) -> (wgpu::Texture, wgpu::TextureView) {
869 let texture = device.create_texture(&wgpu::TextureDescriptor {
870 label: Some("path_intermediate"),
871 size: wgpu::Extent3d {
872 width: width.max(1),
873 height: height.max(1),
874 depth_or_array_layers: 1,
875 },
876 mip_level_count: 1,
877 sample_count: 1,
878 dimension: wgpu::TextureDimension::D2,
879 format,
880 usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::TEXTURE_BINDING,
881 view_formats: &[],
882 });
883 let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
884 (texture, view)
885 }
886
887 fn create_msaa_if_needed(
888 device: &wgpu::Device,
889 format: wgpu::TextureFormat,
890 width: u32,
891 height: u32,
892 sample_count: u32,
893 ) -> Option<(wgpu::Texture, wgpu::TextureView)> {
894 if sample_count <= 1 {
895 return None;
896 }
897 let texture = device.create_texture(&wgpu::TextureDescriptor {
898 label: Some("path_msaa"),
899 size: wgpu::Extent3d {
900 width: width.max(1),
901 height: height.max(1),
902 depth_or_array_layers: 1,
903 },
904 mip_level_count: 1,
905 sample_count,
906 dimension: wgpu::TextureDimension::D2,
907 format,
908 usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
909 view_formats: &[],
910 });
911 let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
912 Some((texture, view))
913 }
914
915 pub fn update_drawable_size(&mut self, size: Size<DevicePixels>) {
916 let width = size.width.0 as u32;
917 let height = size.height.0 as u32;
918
919 if width != self.surface_config.width || height != self.surface_config.height {
920 let clamped_width = width.min(self.max_texture_size);
921 let clamped_height = height.min(self.max_texture_size);
922
923 if clamped_width != width || clamped_height != height {
924 warn!(
925 "Requested surface size ({}, {}) exceeds maximum texture dimension {}. \
926 Clamping to ({}, {}). Window content may not fill the entire window.",
927 width, height, self.max_texture_size, clamped_width, clamped_height
928 );
929 }
930
931 self.surface_config.width = clamped_width.max(1);
932 self.surface_config.height = clamped_height.max(1);
933 let surface_config = self.surface_config.clone();
934
935 let resources = self.resources_mut();
936
937 // Wait for any in-flight GPU work to complete before destroying textures
938 if let Err(e) = resources.device.poll(wgpu::PollType::Wait {
939 submission_index: None,
940 timeout: None,
941 }) {
942 warn!("Failed to poll device during resize: {e:?}");
943 }
944
945 // Destroy old textures before allocating new ones to avoid GPU memory spikes
946 if let Some(ref texture) = resources.path_intermediate_texture {
947 texture.destroy();
948 }
949 if let Some(ref texture) = resources.path_msaa_texture {
950 texture.destroy();
951 }
952
953 resources
954 .surface
955 .configure(&resources.device, &surface_config);
956
957 // Invalidate intermediate textures - they will be lazily recreated
958 // in draw() after we confirm the surface is healthy. This avoids
959 // panics when the device/surface is in an invalid state during resize.
960 resources.path_intermediate_texture = None;
961 resources.path_intermediate_view = None;
962 resources.path_msaa_texture = None;
963 resources.path_msaa_view = None;
964 }
965 }
966
967 fn ensure_intermediate_textures(&mut self) {
968 if self.resources().path_intermediate_texture.is_some() {
969 return;
970 }
971
972 let format = self.surface_config.format;
973 let width = self.surface_config.width;
974 let height = self.surface_config.height;
975 let path_sample_count = self.rendering_params.path_sample_count;
976 let resources = self.resources_mut();
977
978 let (t, v) = Self::create_path_intermediate(&resources.device, format, width, height);
979 resources.path_intermediate_texture = Some(t);
980 resources.path_intermediate_view = Some(v);
981
982 let (path_msaa_texture, path_msaa_view) = Self::create_msaa_if_needed(
983 &resources.device,
984 format,
985 width,
986 height,
987 path_sample_count,
988 )
989 .map(|(t, v)| (Some(t), Some(v)))
990 .unwrap_or((None, None));
991 resources.path_msaa_texture = path_msaa_texture;
992 resources.path_msaa_view = path_msaa_view;
993 }
994
995 pub fn update_transparency(&mut self, transparent: bool) {
996 let new_alpha_mode = if transparent {
997 self.transparent_alpha_mode
998 } else {
999 self.opaque_alpha_mode
1000 };
1001
1002 if new_alpha_mode != self.surface_config.alpha_mode {
1003 self.surface_config.alpha_mode = new_alpha_mode;
1004 let surface_config = self.surface_config.clone();
1005 let path_sample_count = self.rendering_params.path_sample_count;
1006 let dual_source_blending = self.dual_source_blending;
1007 let resources = self.resources_mut();
1008 resources
1009 .surface
1010 .configure(&resources.device, &surface_config);
1011 resources.pipelines = Self::create_pipelines(
1012 &resources.device,
1013 &resources.bind_group_layouts,
1014 surface_config.format,
1015 surface_config.alpha_mode,
1016 path_sample_count,
1017 dual_source_blending,
1018 );
1019 }
1020 }
1021
1022 #[allow(dead_code)]
1023 pub fn viewport_size(&self) -> Size<DevicePixels> {
1024 Size {
1025 width: DevicePixels(self.surface_config.width as i32),
1026 height: DevicePixels(self.surface_config.height as i32),
1027 }
1028 }
1029
1030 pub fn sprite_atlas(&self) -> &Arc<WgpuAtlas> {
1031 &self.atlas
1032 }
1033
1034 pub fn supports_dual_source_blending(&self) -> bool {
1035 self.dual_source_blending
1036 }
1037
1038 pub fn gpu_specs(&self) -> GpuSpecs {
1039 GpuSpecs {
1040 is_software_emulated: self.adapter_info.device_type == wgpu::DeviceType::Cpu,
1041 device_name: self.adapter_info.name.clone(),
1042 driver_name: self.adapter_info.driver.clone(),
1043 driver_info: self.adapter_info.driver_info.clone(),
1044 }
1045 }
1046
1047 pub fn max_texture_size(&self) -> u32 {
1048 self.max_texture_size
1049 }
1050
1051 pub fn draw(&mut self, scene: &Scene) {
1052 // Bail out early if the surface has been unconfigured (e.g. during
1053 // Android background/rotation transitions). Attempting to acquire
1054 // a texture from an unconfigured surface can block indefinitely on
1055 // some drivers (Adreno).
1056 if !self.surface_configured {
1057 return;
1058 }
1059
1060 let last_error = self.last_error.lock().unwrap().take();
1061 if let Some(error) = last_error {
1062 self.failed_frame_count += 1;
1063 log::error!(
1064 "GPU error during frame (failure {} of 20): {error}",
1065 self.failed_frame_count
1066 );
1067 if self.failed_frame_count > 20 {
1068 panic!("Too many consecutive GPU errors. Last error: {error}");
1069 }
1070 } else {
1071 self.failed_frame_count = 0;
1072 }
1073
1074 self.atlas.before_frame();
1075
1076 let frame = match self.resources().surface.get_current_texture() {
1077 wgpu::CurrentSurfaceTexture::Success(frame) => frame,
1078 wgpu::CurrentSurfaceTexture::Suboptimal(frame) => {
1079 // Textures must be destroyed before the surface can be reconfigured.
1080 drop(frame);
1081 let surface_config = self.surface_config.clone();
1082 let resources = self.resources_mut();
1083 resources
1084 .surface
1085 .configure(&resources.device, &surface_config);
1086 return;
1087 }
1088 wgpu::CurrentSurfaceTexture::Lost | wgpu::CurrentSurfaceTexture::Outdated => {
1089 let surface_config = self.surface_config.clone();
1090 let resources = self.resources_mut();
1091 resources
1092 .surface
1093 .configure(&resources.device, &surface_config);
1094 return;
1095 }
1096 wgpu::CurrentSurfaceTexture::Timeout | wgpu::CurrentSurfaceTexture::Occluded => {
1097 return;
1098 }
1099 wgpu::CurrentSurfaceTexture::Validation => {
1100 *self.last_error.lock().unwrap() =
1101 Some("Surface texture validation error".to_string());
1102 return;
1103 }
1104 };
1105
1106 // Now that we know the surface is healthy, ensure intermediate textures exist
1107 self.ensure_intermediate_textures();
1108
1109 let frame_view = frame
1110 .texture
1111 .create_view(&wgpu::TextureViewDescriptor::default());
1112
1113 let gamma_params = GammaParams {
1114 gamma_ratios: self.rendering_params.gamma_ratios,
1115 grayscale_enhanced_contrast: self.rendering_params.grayscale_enhanced_contrast,
1116 subpixel_enhanced_contrast: self.rendering_params.subpixel_enhanced_contrast,
1117 _pad: [0.0; 2],
1118 };
1119
1120 let globals = GlobalParams {
1121 viewport_size: [
1122 self.surface_config.width as f32,
1123 self.surface_config.height as f32,
1124 ],
1125 premultiplied_alpha: if self.surface_config.alpha_mode
1126 == wgpu::CompositeAlphaMode::PreMultiplied
1127 {
1128 1
1129 } else {
1130 0
1131 },
1132 pad: 0,
1133 };
1134
1135 let path_globals = GlobalParams {
1136 premultiplied_alpha: 0,
1137 ..globals
1138 };
1139
1140 {
1141 let resources = self.resources();
1142 resources.queue.write_buffer(
1143 &resources.globals_buffer,
1144 0,
1145 bytemuck::bytes_of(&globals),
1146 );
1147 resources.queue.write_buffer(
1148 &resources.globals_buffer,
1149 self.path_globals_offset,
1150 bytemuck::bytes_of(&path_globals),
1151 );
1152 resources.queue.write_buffer(
1153 &resources.globals_buffer,
1154 self.gamma_offset,
1155 bytemuck::bytes_of(&gamma_params),
1156 );
1157 }
1158
1159 loop {
1160 let mut instance_offset: u64 = 0;
1161 let mut overflow = false;
1162
1163 let mut encoder =
1164 self.resources()
1165 .device
1166 .create_command_encoder(&wgpu::CommandEncoderDescriptor {
1167 label: Some("main_encoder"),
1168 });
1169
1170 {
1171 let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
1172 label: Some("main_pass"),
1173 color_attachments: &[Some(wgpu::RenderPassColorAttachment {
1174 view: &frame_view,
1175 resolve_target: None,
1176 ops: wgpu::Operations {
1177 load: wgpu::LoadOp::Clear(wgpu::Color::TRANSPARENT),
1178 store: wgpu::StoreOp::Store,
1179 },
1180 depth_slice: None,
1181 })],
1182 depth_stencil_attachment: None,
1183 ..Default::default()
1184 });
1185
1186 for batch in scene.batches() {
1187 let ok = match batch {
1188 PrimitiveBatch::Quads(range) => {
1189 self.draw_quads(&scene.quads[range], &mut instance_offset, &mut pass)
1190 }
1191 PrimitiveBatch::Shadows(range) => self.draw_shadows(
1192 &scene.shadows[range],
1193 &mut instance_offset,
1194 &mut pass,
1195 ),
1196 PrimitiveBatch::Paths(range) => {
1197 let paths = &scene.paths[range];
1198 if paths.is_empty() {
1199 continue;
1200 }
1201
1202 drop(pass);
1203
1204 let did_draw = self.draw_paths_to_intermediate(
1205 &mut encoder,
1206 paths,
1207 &mut instance_offset,
1208 );
1209
1210 pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
1211 label: Some("main_pass_continued"),
1212 color_attachments: &[Some(wgpu::RenderPassColorAttachment {
1213 view: &frame_view,
1214 resolve_target: None,
1215 ops: wgpu::Operations {
1216 load: wgpu::LoadOp::Load,
1217 store: wgpu::StoreOp::Store,
1218 },
1219 depth_slice: None,
1220 })],
1221 depth_stencil_attachment: None,
1222 ..Default::default()
1223 });
1224
1225 if did_draw {
1226 self.draw_paths_from_intermediate(
1227 paths,
1228 &mut instance_offset,
1229 &mut pass,
1230 )
1231 } else {
1232 false
1233 }
1234 }
1235 PrimitiveBatch::Underlines(range) => self.draw_underlines(
1236 &scene.underlines[range],
1237 &mut instance_offset,
1238 &mut pass,
1239 ),
1240 PrimitiveBatch::MonochromeSprites { texture_id, range } => self
1241 .draw_monochrome_sprites(
1242 &scene.monochrome_sprites[range],
1243 texture_id,
1244 &mut instance_offset,
1245 &mut pass,
1246 ),
1247 PrimitiveBatch::SubpixelSprites { texture_id, range } => self
1248 .draw_subpixel_sprites(
1249 &scene.subpixel_sprites[range],
1250 texture_id,
1251 &mut instance_offset,
1252 &mut pass,
1253 ),
1254 PrimitiveBatch::PolychromeSprites { texture_id, range } => self
1255 .draw_polychrome_sprites(
1256 &scene.polychrome_sprites[range],
1257 texture_id,
1258 &mut instance_offset,
1259 &mut pass,
1260 ),
1261 PrimitiveBatch::Surfaces(_surfaces) => {
1262 // Surfaces are macOS-only for video playback
1263 // Not implemented for Linux/wgpu
1264 true
1265 }
1266 };
1267 if !ok {
1268 overflow = true;
1269 break;
1270 }
1271 }
1272 }
1273
1274 if overflow {
1275 drop(encoder);
1276 if self.instance_buffer_capacity >= self.max_buffer_size {
1277 log::error!(
1278 "instance buffer size grew too large: {}",
1279 self.instance_buffer_capacity
1280 );
1281 frame.present();
1282 return;
1283 }
1284 self.grow_instance_buffer();
1285 continue;
1286 }
1287
1288 self.resources()
1289 .queue
1290 .submit(std::iter::once(encoder.finish()));
1291 frame.present();
1292 return;
1293 }
1294 }
1295
1296 fn draw_quads(
1297 &self,
1298 quads: &[Quad],
1299 instance_offset: &mut u64,
1300 pass: &mut wgpu::RenderPass<'_>,
1301 ) -> bool {
1302 let data = unsafe { Self::instance_bytes(quads) };
1303 self.draw_instances(
1304 data,
1305 quads.len() as u32,
1306 &self.resources().pipelines.quads,
1307 instance_offset,
1308 pass,
1309 )
1310 }
1311
1312 fn draw_shadows(
1313 &self,
1314 shadows: &[Shadow],
1315 instance_offset: &mut u64,
1316 pass: &mut wgpu::RenderPass<'_>,
1317 ) -> bool {
1318 let data = unsafe { Self::instance_bytes(shadows) };
1319 self.draw_instances(
1320 data,
1321 shadows.len() as u32,
1322 &self.resources().pipelines.shadows,
1323 instance_offset,
1324 pass,
1325 )
1326 }
1327
1328 fn draw_underlines(
1329 &self,
1330 underlines: &[Underline],
1331 instance_offset: &mut u64,
1332 pass: &mut wgpu::RenderPass<'_>,
1333 ) -> bool {
1334 let data = unsafe { Self::instance_bytes(underlines) };
1335 self.draw_instances(
1336 data,
1337 underlines.len() as u32,
1338 &self.resources().pipelines.underlines,
1339 instance_offset,
1340 pass,
1341 )
1342 }
1343
1344 fn draw_monochrome_sprites(
1345 &self,
1346 sprites: &[MonochromeSprite],
1347 texture_id: AtlasTextureId,
1348 instance_offset: &mut u64,
1349 pass: &mut wgpu::RenderPass<'_>,
1350 ) -> bool {
1351 let tex_info = self.atlas.get_texture_info(texture_id);
1352 let data = unsafe { Self::instance_bytes(sprites) };
1353 self.draw_instances_with_texture(
1354 data,
1355 sprites.len() as u32,
1356 &tex_info.view,
1357 &self.resources().pipelines.mono_sprites,
1358 instance_offset,
1359 pass,
1360 )
1361 }
1362
1363 fn draw_subpixel_sprites(
1364 &self,
1365 sprites: &[SubpixelSprite],
1366 texture_id: AtlasTextureId,
1367 instance_offset: &mut u64,
1368 pass: &mut wgpu::RenderPass<'_>,
1369 ) -> bool {
1370 let tex_info = self.atlas.get_texture_info(texture_id);
1371 let data = unsafe { Self::instance_bytes(sprites) };
1372 let resources = self.resources();
1373 let pipeline = resources
1374 .pipelines
1375 .subpixel_sprites
1376 .as_ref()
1377 .unwrap_or(&resources.pipelines.mono_sprites);
1378 self.draw_instances_with_texture(
1379 data,
1380 sprites.len() as u32,
1381 &tex_info.view,
1382 pipeline,
1383 instance_offset,
1384 pass,
1385 )
1386 }
1387
1388 fn draw_polychrome_sprites(
1389 &self,
1390 sprites: &[PolychromeSprite],
1391 texture_id: AtlasTextureId,
1392 instance_offset: &mut u64,
1393 pass: &mut wgpu::RenderPass<'_>,
1394 ) -> bool {
1395 let tex_info = self.atlas.get_texture_info(texture_id);
1396 let data = unsafe { Self::instance_bytes(sprites) };
1397 self.draw_instances_with_texture(
1398 data,
1399 sprites.len() as u32,
1400 &tex_info.view,
1401 &self.resources().pipelines.poly_sprites,
1402 instance_offset,
1403 pass,
1404 )
1405 }
1406
1407 fn draw_instances(
1408 &self,
1409 data: &[u8],
1410 instance_count: u32,
1411 pipeline: &wgpu::RenderPipeline,
1412 instance_offset: &mut u64,
1413 pass: &mut wgpu::RenderPass<'_>,
1414 ) -> bool {
1415 if instance_count == 0 {
1416 return true;
1417 }
1418 let Some((offset, size)) = self.write_to_instance_buffer(instance_offset, data) else {
1419 return false;
1420 };
1421 let resources = self.resources();
1422 let bind_group = resources
1423 .device
1424 .create_bind_group(&wgpu::BindGroupDescriptor {
1425 label: None,
1426 layout: &resources.bind_group_layouts.instances,
1427 entries: &[wgpu::BindGroupEntry {
1428 binding: 0,
1429 resource: self.instance_binding(offset, size),
1430 }],
1431 });
1432 pass.set_pipeline(pipeline);
1433 pass.set_bind_group(0, &resources.globals_bind_group, &[]);
1434 pass.set_bind_group(1, &bind_group, &[]);
1435 pass.draw(0..4, 0..instance_count);
1436 true
1437 }
1438
1439 fn draw_instances_with_texture(
1440 &self,
1441 data: &[u8],
1442 instance_count: u32,
1443 texture_view: &wgpu::TextureView,
1444 pipeline: &wgpu::RenderPipeline,
1445 instance_offset: &mut u64,
1446 pass: &mut wgpu::RenderPass<'_>,
1447 ) -> bool {
1448 if instance_count == 0 {
1449 return true;
1450 }
1451 let Some((offset, size)) = self.write_to_instance_buffer(instance_offset, data) else {
1452 return false;
1453 };
1454 let resources = self.resources();
1455 let bind_group = resources
1456 .device
1457 .create_bind_group(&wgpu::BindGroupDescriptor {
1458 label: None,
1459 layout: &resources.bind_group_layouts.instances_with_texture,
1460 entries: &[
1461 wgpu::BindGroupEntry {
1462 binding: 0,
1463 resource: self.instance_binding(offset, size),
1464 },
1465 wgpu::BindGroupEntry {
1466 binding: 1,
1467 resource: wgpu::BindingResource::TextureView(texture_view),
1468 },
1469 wgpu::BindGroupEntry {
1470 binding: 2,
1471 resource: wgpu::BindingResource::Sampler(&resources.atlas_sampler),
1472 },
1473 ],
1474 });
1475 pass.set_pipeline(pipeline);
1476 pass.set_bind_group(0, &resources.globals_bind_group, &[]);
1477 pass.set_bind_group(1, &bind_group, &[]);
1478 pass.draw(0..4, 0..instance_count);
1479 true
1480 }
1481
1482 unsafe fn instance_bytes<T>(instances: &[T]) -> &[u8] {
1483 unsafe {
1484 std::slice::from_raw_parts(
1485 instances.as_ptr() as *const u8,
1486 std::mem::size_of_val(instances),
1487 )
1488 }
1489 }
1490
1491 fn draw_paths_from_intermediate(
1492 &self,
1493 paths: &[Path<ScaledPixels>],
1494 instance_offset: &mut u64,
1495 pass: &mut wgpu::RenderPass<'_>,
1496 ) -> bool {
1497 let first_path = &paths[0];
1498 let sprites: Vec<PathSprite> = if paths.last().map(|p| &p.order) == Some(&first_path.order)
1499 {
1500 paths
1501 .iter()
1502 .map(|p| PathSprite {
1503 bounds: p.clipped_bounds(),
1504 })
1505 .collect()
1506 } else {
1507 let mut bounds = first_path.clipped_bounds();
1508 for path in paths.iter().skip(1) {
1509 bounds = bounds.union(&path.clipped_bounds());
1510 }
1511 vec![PathSprite { bounds }]
1512 };
1513
1514 let resources = self.resources();
1515 let Some(path_intermediate_view) = resources.path_intermediate_view.as_ref() else {
1516 return true;
1517 };
1518
1519 let sprite_data = unsafe { Self::instance_bytes(&sprites) };
1520 self.draw_instances_with_texture(
1521 sprite_data,
1522 sprites.len() as u32,
1523 path_intermediate_view,
1524 &resources.pipelines.paths,
1525 instance_offset,
1526 pass,
1527 )
1528 }
1529
1530 fn draw_paths_to_intermediate(
1531 &self,
1532 encoder: &mut wgpu::CommandEncoder,
1533 paths: &[Path<ScaledPixels>],
1534 instance_offset: &mut u64,
1535 ) -> bool {
1536 let mut vertices = Vec::new();
1537 for path in paths {
1538 let bounds = path.clipped_bounds();
1539 vertices.extend(path.vertices.iter().map(|v| PathRasterizationVertex {
1540 xy_position: v.xy_position,
1541 st_position: v.st_position,
1542 color: path.color,
1543 bounds,
1544 }));
1545 }
1546
1547 if vertices.is_empty() {
1548 return true;
1549 }
1550
1551 let vertex_data = unsafe { Self::instance_bytes(&vertices) };
1552 let Some((vertex_offset, vertex_size)) =
1553 self.write_to_instance_buffer(instance_offset, vertex_data)
1554 else {
1555 return false;
1556 };
1557
1558 let resources = self.resources();
1559 let data_bind_group = resources
1560 .device
1561 .create_bind_group(&wgpu::BindGroupDescriptor {
1562 label: Some("path_rasterization_bind_group"),
1563 layout: &resources.bind_group_layouts.instances,
1564 entries: &[wgpu::BindGroupEntry {
1565 binding: 0,
1566 resource: self.instance_binding(vertex_offset, vertex_size),
1567 }],
1568 });
1569
1570 let Some(path_intermediate_view) = resources.path_intermediate_view.as_ref() else {
1571 return true;
1572 };
1573
1574 let (target_view, resolve_target) = if let Some(ref msaa_view) = resources.path_msaa_view {
1575 (msaa_view, Some(path_intermediate_view))
1576 } else {
1577 (path_intermediate_view, None)
1578 };
1579
1580 {
1581 let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
1582 label: Some("path_rasterization_pass"),
1583 color_attachments: &[Some(wgpu::RenderPassColorAttachment {
1584 view: target_view,
1585 resolve_target,
1586 ops: wgpu::Operations {
1587 load: wgpu::LoadOp::Clear(wgpu::Color::TRANSPARENT),
1588 store: wgpu::StoreOp::Store,
1589 },
1590 depth_slice: None,
1591 })],
1592 depth_stencil_attachment: None,
1593 ..Default::default()
1594 });
1595
1596 pass.set_pipeline(&resources.pipelines.path_rasterization);
1597 pass.set_bind_group(0, &resources.path_globals_bind_group, &[]);
1598 pass.set_bind_group(1, &data_bind_group, &[]);
1599 pass.draw(0..vertices.len() as u32, 0..1);
1600 }
1601
1602 true
1603 }
1604
1605 fn grow_instance_buffer(&mut self) {
1606 let new_capacity = (self.instance_buffer_capacity * 2).min(self.max_buffer_size);
1607 log::info!("increased instance buffer size to {}", new_capacity);
1608 let resources = self.resources_mut();
1609 resources.instance_buffer = resources.device.create_buffer(&wgpu::BufferDescriptor {
1610 label: Some("instance_buffer"),
1611 size: new_capacity,
1612 usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
1613 mapped_at_creation: false,
1614 });
1615 self.instance_buffer_capacity = new_capacity;
1616 }
1617
1618 fn write_to_instance_buffer(
1619 &self,
1620 instance_offset: &mut u64,
1621 data: &[u8],
1622 ) -> Option<(u64, NonZeroU64)> {
1623 let offset = (*instance_offset).next_multiple_of(self.storage_buffer_alignment);
1624 let size = (data.len() as u64).max(16);
1625 if offset + size > self.instance_buffer_capacity {
1626 return None;
1627 }
1628 let resources = self.resources();
1629 resources
1630 .queue
1631 .write_buffer(&resources.instance_buffer, offset, data);
1632 *instance_offset = offset + size;
1633 Some((offset, NonZeroU64::new(size).expect("size is at least 16")))
1634 }
1635
1636 fn instance_binding(&self, offset: u64, size: NonZeroU64) -> wgpu::BindingResource<'_> {
1637 wgpu::BindingResource::Buffer(wgpu::BufferBinding {
1638 buffer: &self.resources().instance_buffer,
1639 offset,
1640 size: Some(size),
1641 })
1642 }
1643
1644 /// Mark the surface as unconfigured so rendering is skipped until a new
1645 /// surface is provided via [`replace_surface`](Self::replace_surface).
1646 ///
1647 /// This does **not** drop the renderer — the device, queue, atlas, and
1648 /// pipelines stay alive. Use this when the native window is destroyed
1649 /// (e.g. Android `TerminateWindow`) but you intend to re-create the
1650 /// surface later without losing cached atlas textures.
1651 pub fn unconfigure_surface(&mut self) {
1652 self.surface_configured = false;
1653 // Drop intermediate textures since they reference the old surface size.
1654 if let Some(res) = self.resources.as_mut() {
1655 res.path_intermediate_texture = None;
1656 res.path_intermediate_view = None;
1657 res.path_msaa_texture = None;
1658 res.path_msaa_view = None;
1659 }
1660 }
1661
1662 /// Replace the wgpu surface with a new one (e.g. after Android destroys
1663 /// and recreates the native window). Keeps the device, queue, atlas, and
1664 /// all pipelines intact so cached `AtlasTextureId`s remain valid.
1665 ///
1666 /// The `instance` **must** be the same [`wgpu::Instance`] that was used to
1667 /// create the adapter and device (i.e. from the [`WgpuContext`]). Using a
1668 /// different instance will cause a "Device does not exist" panic because
1669 /// the wgpu device is bound to its originating instance.
1670 #[cfg(not(target_family = "wasm"))]
1671 pub fn replace_surface<W: HasWindowHandle>(
1672 &mut self,
1673 window: &W,
1674 config: WgpuSurfaceConfig,
1675 instance: &wgpu::Instance,
1676 ) -> anyhow::Result<()> {
1677 let window_handle = window
1678 .window_handle()
1679 .map_err(|e| anyhow::anyhow!("Failed to get window handle: {e}"))?;
1680
1681 let surface = create_surface(instance, window_handle.as_raw())?;
1682
1683 let width = (config.size.width.0 as u32).max(1);
1684 let height = (config.size.height.0 as u32).max(1);
1685
1686 let alpha_mode = if config.transparent {
1687 self.transparent_alpha_mode
1688 } else {
1689 self.opaque_alpha_mode
1690 };
1691
1692 self.surface_config.width = width;
1693 self.surface_config.height = height;
1694 self.surface_config.alpha_mode = alpha_mode;
1695 if let Some(mode) = config.preferred_present_mode {
1696 self.surface_config.present_mode = mode;
1697 }
1698
1699 {
1700 let res = self
1701 .resources
1702 .as_mut()
1703 .expect("GPU resources not available");
1704 surface.configure(&res.device, &self.surface_config);
1705 res.surface = surface;
1706
1707 // Invalidate intermediate textures — they'll be recreated lazily.
1708 res.path_intermediate_texture = None;
1709 res.path_intermediate_view = None;
1710 res.path_msaa_texture = None;
1711 res.path_msaa_view = None;
1712 }
1713
1714 self.surface_configured = true;
1715
1716 Ok(())
1717 }
1718
1719 pub fn destroy(&mut self) {
1720 // Release surface-bound GPU resources eagerly so the underlying native
1721 // window can be destroyed before the renderer itself is dropped.
1722 self.resources.take();
1723 }
1724
1725 /// Returns true if the GPU device was lost and recovery is needed.
1726 pub fn device_lost(&self) -> bool {
1727 self.device_lost.load(std::sync::atomic::Ordering::SeqCst)
1728 }
1729
1730 /// Recovers from a lost GPU device by recreating the renderer with a new context.
1731 ///
1732 /// Call this after detecting `device_lost()` returns true.
1733 ///
1734 /// This method coordinates recovery across multiple windows:
1735 /// - The first window to call this will recreate the shared context
1736 /// - Subsequent windows will adopt the already-recovered context
1737 #[cfg(not(target_family = "wasm"))]
1738 pub fn recover<W>(&mut self, window: &W) -> anyhow::Result<()>
1739 where
1740 W: HasWindowHandle + HasDisplayHandle + std::fmt::Debug + Send + Sync + Clone + 'static,
1741 {
1742 let gpu_context = self.context.as_ref().expect("recover requires gpu_context");
1743
1744 // Check if another window already recovered the context
1745 let needs_new_context = gpu_context
1746 .borrow()
1747 .as_ref()
1748 .is_none_or(|ctx| ctx.device_lost());
1749
1750 let window_handle = window
1751 .window_handle()
1752 .map_err(|e| anyhow::anyhow!("Failed to get window handle: {e}"))?;
1753
1754 let surface = if needs_new_context {
1755 log::warn!("GPU device lost, recreating context...");
1756
1757 // Drop old resources to release Arc<Device>/Arc<Queue> and GPU resources
1758 self.resources = None;
1759 *gpu_context.borrow_mut() = None;
1760
1761 // Wait for GPU driver to stabilize (350ms copied from windows :shrug:)
1762 std::thread::sleep(std::time::Duration::from_millis(350));
1763
1764 let instance = WgpuContext::instance(Box::new(window.clone()));
1765 let surface = create_surface(&instance, window_handle.as_raw())?;
1766 let new_context = WgpuContext::new(instance, &surface, self.compositor_gpu)?;
1767 *gpu_context.borrow_mut() = Some(new_context);
1768 surface
1769 } else {
1770 let ctx_ref = gpu_context.borrow();
1771 let instance = &ctx_ref.as_ref().unwrap().instance;
1772 create_surface(instance, window_handle.as_raw())?
1773 };
1774
1775 let config = WgpuSurfaceConfig {
1776 size: gpui::Size {
1777 width: gpui::DevicePixels(self.surface_config.width as i32),
1778 height: gpui::DevicePixels(self.surface_config.height as i32),
1779 },
1780 transparent: self.surface_config.alpha_mode != wgpu::CompositeAlphaMode::Opaque,
1781 preferred_present_mode: Some(self.surface_config.present_mode),
1782 };
1783 let gpu_context = Rc::clone(gpu_context);
1784 let ctx_ref = gpu_context.borrow();
1785 let context = ctx_ref.as_ref().expect("context should exist");
1786
1787 self.resources = None;
1788 self.atlas
1789 .handle_device_lost(Arc::clone(&context.device), Arc::clone(&context.queue));
1790
1791 *self = Self::new_internal(
1792 Some(gpu_context.clone()),
1793 context,
1794 surface,
1795 config,
1796 self.compositor_gpu,
1797 self.atlas.clone(),
1798 )?;
1799
1800 log::info!("GPU recovery complete");
1801 Ok(())
1802 }
1803}
1804
1805#[cfg(not(target_family = "wasm"))]
1806fn create_surface(
1807 instance: &wgpu::Instance,
1808 raw_window_handle: raw_window_handle::RawWindowHandle,
1809) -> anyhow::Result<wgpu::Surface<'static>> {
1810 unsafe {
1811 instance
1812 .create_surface_unsafe(wgpu::SurfaceTargetUnsafe::RawHandle {
1813 // Fall back to the display handle already provided via InstanceDescriptor::display.
1814 raw_display_handle: None,
1815 raw_window_handle,
1816 })
1817 .map_err(|e| anyhow::anyhow!("{e}"))
1818 }
1819}
1820
1821struct RenderingParameters {
1822 path_sample_count: u32,
1823 gamma_ratios: [f32; 4],
1824 grayscale_enhanced_contrast: f32,
1825 subpixel_enhanced_contrast: f32,
1826}
1827
1828impl RenderingParameters {
1829 fn new(adapter: &wgpu::Adapter, surface_format: wgpu::TextureFormat) -> Self {
1830 use std::env;
1831
1832 let format_features = adapter.get_texture_format_features(surface_format);
1833 let path_sample_count = [4, 2, 1]
1834 .into_iter()
1835 .find(|&n| format_features.flags.sample_count_supported(n))
1836 .unwrap_or(1);
1837
1838 let gamma = env::var("ZED_FONTS_GAMMA")
1839 .ok()
1840 .and_then(|v| v.parse().ok())
1841 .unwrap_or(1.8_f32)
1842 .clamp(1.0, 2.2);
1843 let gamma_ratios = get_gamma_correction_ratios(gamma);
1844
1845 let grayscale_enhanced_contrast = env::var("ZED_FONTS_GRAYSCALE_ENHANCED_CONTRAST")
1846 .ok()
1847 .and_then(|v| v.parse().ok())
1848 .unwrap_or(1.0_f32)
1849 .max(0.0);
1850
1851 let subpixel_enhanced_contrast = env::var("ZED_FONTS_SUBPIXEL_ENHANCED_CONTRAST")
1852 .ok()
1853 .and_then(|v| v.parse().ok())
1854 .unwrap_or(0.5_f32)
1855 .max(0.0);
1856
1857 Self {
1858 path_sample_count,
1859 gamma_ratios,
1860 grayscale_enhanced_contrast,
1861 subpixel_enhanced_contrast,
1862 }
1863 }
1864}