1use crate::{CompositorGpuHint, WgpuAtlas, WgpuContext};
2use bytemuck::{Pod, Zeroable};
3use gpui::{
4 AtlasTextureId, Background, Bounds, DevicePixels, GpuSpecs, MonochromeSprite, Path, Point,
5 PolychromeSprite, PrimitiveBatch, Quad, ScaledPixels, Scene, Shadow, Size, SubpixelSprite,
6 Underline, get_gamma_correction_ratios,
7};
8use log::warn;
9#[cfg(not(target_family = "wasm"))]
10use raw_window_handle::{HasDisplayHandle, HasWindowHandle};
11use std::cell::RefCell;
12use std::num::NonZeroU64;
13use std::rc::Rc;
14use std::sync::{Arc, Mutex};
15
16#[repr(C)]
17#[derive(Clone, Copy, Pod, Zeroable)]
18struct GlobalParams {
19 viewport_size: [f32; 2],
20 premultiplied_alpha: u32,
21 pad: u32,
22}
23
24#[repr(C)]
25#[derive(Clone, Copy, Pod, Zeroable)]
26struct PodBounds {
27 origin: [f32; 2],
28 size: [f32; 2],
29}
30
31impl From<Bounds<ScaledPixels>> for PodBounds {
32 fn from(bounds: Bounds<ScaledPixels>) -> Self {
33 Self {
34 origin: [bounds.origin.x.0, bounds.origin.y.0],
35 size: [bounds.size.width.0, bounds.size.height.0],
36 }
37 }
38}
39
40#[repr(C)]
41#[derive(Clone, Copy, Pod, Zeroable)]
42struct SurfaceParams {
43 bounds: PodBounds,
44 content_mask: PodBounds,
45}
46
47#[repr(C)]
48#[derive(Clone, Copy, Pod, Zeroable)]
49struct GammaParams {
50 gamma_ratios: [f32; 4],
51 grayscale_enhanced_contrast: f32,
52 subpixel_enhanced_contrast: f32,
53 _pad: [f32; 2],
54}
55
56#[derive(Clone, Debug)]
57#[repr(C)]
58struct PathSprite {
59 bounds: Bounds<ScaledPixels>,
60}
61
62#[derive(Clone, Debug)]
63#[repr(C)]
64struct PathRasterizationVertex {
65 xy_position: Point<ScaledPixels>,
66 st_position: Point<f32>,
67 color: Background,
68 bounds: Bounds<ScaledPixels>,
69}
70
71pub struct WgpuSurfaceConfig {
72 pub size: Size<DevicePixels>,
73 pub transparent: bool,
74 /// Preferred presentation mode. When `Some`, the renderer will use this
75 /// mode if supported by the surface, falling back to `Fifo`.
76 /// When `None`, defaults to `Fifo` (VSync).
77 ///
78 /// Mobile platforms may prefer `Mailbox` (triple-buffering) to avoid
79 /// blocking in `get_current_texture()` during lifecycle transitions.
80 pub preferred_present_mode: Option<wgpu::PresentMode>,
81}
82
83struct WgpuPipelines {
84 quads: wgpu::RenderPipeline,
85 shadows: wgpu::RenderPipeline,
86 path_rasterization: wgpu::RenderPipeline,
87 paths: wgpu::RenderPipeline,
88 underlines: wgpu::RenderPipeline,
89 mono_sprites: wgpu::RenderPipeline,
90 subpixel_sprites: Option<wgpu::RenderPipeline>,
91 poly_sprites: wgpu::RenderPipeline,
92 #[allow(dead_code)]
93 surfaces: wgpu::RenderPipeline,
94}
95
96struct WgpuBindGroupLayouts {
97 globals: wgpu::BindGroupLayout,
98 instances: wgpu::BindGroupLayout,
99 instances_with_texture: wgpu::BindGroupLayout,
100 surfaces: wgpu::BindGroupLayout,
101}
102
103/// Shared GPU context reference, used to coordinate device recovery across multiple windows.
104pub type GpuContext = Rc<RefCell<Option<WgpuContext>>>;
105
106/// GPU resources that must be dropped together during device recovery.
107struct WgpuResources {
108 device: Arc<wgpu::Device>,
109 queue: Arc<wgpu::Queue>,
110 surface: wgpu::Surface<'static>,
111 pipelines: WgpuPipelines,
112 bind_group_layouts: WgpuBindGroupLayouts,
113 atlas_sampler: wgpu::Sampler,
114 globals_buffer: wgpu::Buffer,
115 globals_bind_group: wgpu::BindGroup,
116 path_globals_bind_group: wgpu::BindGroup,
117 instance_buffer: wgpu::Buffer,
118 path_intermediate_texture: Option<wgpu::Texture>,
119 path_intermediate_view: Option<wgpu::TextureView>,
120 path_msaa_texture: Option<wgpu::Texture>,
121 path_msaa_view: Option<wgpu::TextureView>,
122}
123
124pub struct WgpuRenderer {
125 /// Shared GPU context for device recovery coordination (unused on WASM).
126 #[allow(dead_code)]
127 context: Option<GpuContext>,
128 /// Compositor GPU hint for adapter selection (unused on WASM).
129 #[allow(dead_code)]
130 compositor_gpu: Option<CompositorGpuHint>,
131 resources: Option<WgpuResources>,
132 surface_config: wgpu::SurfaceConfiguration,
133 atlas: Arc<WgpuAtlas>,
134 path_globals_offset: u64,
135 gamma_offset: u64,
136 instance_buffer_capacity: u64,
137 max_buffer_size: u64,
138 storage_buffer_alignment: u64,
139 rendering_params: RenderingParameters,
140 dual_source_blending: bool,
141 adapter_info: wgpu::AdapterInfo,
142 transparent_alpha_mode: wgpu::CompositeAlphaMode,
143 opaque_alpha_mode: wgpu::CompositeAlphaMode,
144 max_texture_size: u32,
145 last_error: Arc<Mutex<Option<String>>>,
146 failed_frame_count: u32,
147 device_lost: std::sync::Arc<std::sync::atomic::AtomicBool>,
148 surface_configured: bool,
149}
150
151impl WgpuRenderer {
152 fn resources(&self) -> &WgpuResources {
153 self.resources
154 .as_ref()
155 .expect("GPU resources not available")
156 }
157
158 fn resources_mut(&mut self) -> &mut WgpuResources {
159 self.resources
160 .as_mut()
161 .expect("GPU resources not available")
162 }
163
164 /// Creates a new WgpuRenderer from raw window handles.
165 ///
166 /// The `gpu_context` is a shared reference that coordinates GPU context across
167 /// multiple windows. The first window to create a renderer will initialize the
168 /// context; subsequent windows will share it.
169 ///
170 /// # Safety
171 /// The caller must ensure that the window handle remains valid for the lifetime
172 /// of the returned renderer.
173 #[cfg(not(target_family = "wasm"))]
174 pub fn new<W>(
175 gpu_context: GpuContext,
176 window: &W,
177 config: WgpuSurfaceConfig,
178 compositor_gpu: Option<CompositorGpuHint>,
179 ) -> anyhow::Result<Self>
180 where
181 W: HasWindowHandle + HasDisplayHandle + std::fmt::Debug + Send + Sync + Clone + 'static,
182 {
183 let window_handle = window
184 .window_handle()
185 .map_err(|e| anyhow::anyhow!("Failed to get window handle: {e}"))?;
186
187 let target = wgpu::SurfaceTargetUnsafe::RawHandle {
188 // Fall back to the display handle already provided via InstanceDescriptor::display.
189 raw_display_handle: None,
190 raw_window_handle: window_handle.as_raw(),
191 };
192
193 // Use the existing context's instance if available, otherwise create a new one.
194 // The surface must be created with the same instance that will be used for
195 // adapter selection, otherwise wgpu will panic.
196 let instance = gpu_context
197 .borrow()
198 .as_ref()
199 .map(|ctx| ctx.instance.clone())
200 .unwrap_or_else(|| WgpuContext::instance(Box::new(window.clone())));
201
202 // Safety: The caller guarantees that the window handle is valid for the
203 // lifetime of this renderer. In practice, the RawWindow struct is created
204 // from the native window handles and the surface is dropped before the window.
205 let surface = unsafe {
206 instance
207 .create_surface_unsafe(target)
208 .map_err(|e| anyhow::anyhow!("Failed to create surface: {e}"))?
209 };
210
211 let mut ctx_ref = gpu_context.borrow_mut();
212 let context = match ctx_ref.as_mut() {
213 Some(context) => {
214 context.check_compatible_with_surface(&surface)?;
215 context
216 }
217 None => ctx_ref.insert(WgpuContext::new(instance, &surface, compositor_gpu)?),
218 };
219
220 let atlas = Arc::new(WgpuAtlas::from_context(context));
221
222 Self::new_internal(
223 Some(Rc::clone(&gpu_context)),
224 context,
225 surface,
226 config,
227 compositor_gpu,
228 atlas,
229 )
230 }
231
232 #[cfg(target_family = "wasm")]
233 pub fn new_from_canvas(
234 context: &WgpuContext,
235 canvas: &web_sys::HtmlCanvasElement,
236 config: WgpuSurfaceConfig,
237 ) -> anyhow::Result<Self> {
238 let surface = context
239 .instance
240 .create_surface(wgpu::SurfaceTarget::Canvas(canvas.clone()))
241 .map_err(|e| anyhow::anyhow!("Failed to create surface: {e}"))?;
242
243 let atlas = Arc::new(WgpuAtlas::from_context(context));
244
245 Self::new_internal(None, context, surface, config, None, atlas)
246 }
247
248 fn new_internal(
249 gpu_context: Option<GpuContext>,
250 context: &WgpuContext,
251 surface: wgpu::Surface<'static>,
252 config: WgpuSurfaceConfig,
253 compositor_gpu: Option<CompositorGpuHint>,
254 atlas: Arc<WgpuAtlas>,
255 ) -> anyhow::Result<Self> {
256 let surface_caps = surface.get_capabilities(&context.adapter);
257 let preferred_formats = [
258 wgpu::TextureFormat::Bgra8Unorm,
259 wgpu::TextureFormat::Rgba8Unorm,
260 ];
261 let surface_format = preferred_formats
262 .iter()
263 .find(|f| surface_caps.formats.contains(f))
264 .copied()
265 .or_else(|| surface_caps.formats.iter().find(|f| !f.is_srgb()).copied())
266 .or_else(|| surface_caps.formats.first().copied())
267 .ok_or_else(|| {
268 anyhow::anyhow!(
269 "Surface reports no supported texture formats for adapter {:?}",
270 context.adapter.get_info().name
271 )
272 })?;
273
274 let pick_alpha_mode =
275 |preferences: &[wgpu::CompositeAlphaMode]| -> anyhow::Result<wgpu::CompositeAlphaMode> {
276 preferences
277 .iter()
278 .find(|p| surface_caps.alpha_modes.contains(p))
279 .copied()
280 .or_else(|| surface_caps.alpha_modes.first().copied())
281 .ok_or_else(|| {
282 anyhow::anyhow!(
283 "Surface reports no supported alpha modes for adapter {:?}",
284 context.adapter.get_info().name
285 )
286 })
287 };
288
289 let transparent_alpha_mode = pick_alpha_mode(&[
290 wgpu::CompositeAlphaMode::PreMultiplied,
291 wgpu::CompositeAlphaMode::Inherit,
292 ])?;
293
294 let opaque_alpha_mode = pick_alpha_mode(&[
295 wgpu::CompositeAlphaMode::Opaque,
296 wgpu::CompositeAlphaMode::Inherit,
297 ])?;
298
299 let alpha_mode = if config.transparent {
300 transparent_alpha_mode
301 } else {
302 opaque_alpha_mode
303 };
304
305 let device = Arc::clone(&context.device);
306 let max_texture_size = device.limits().max_texture_dimension_2d;
307
308 let requested_width = config.size.width.0 as u32;
309 let requested_height = config.size.height.0 as u32;
310 let clamped_width = requested_width.min(max_texture_size);
311 let clamped_height = requested_height.min(max_texture_size);
312
313 if clamped_width != requested_width || clamped_height != requested_height {
314 warn!(
315 "Requested surface size ({}, {}) exceeds maximum texture dimension {}. \
316 Clamping to ({}, {}). Window content may not fill the entire window.",
317 requested_width, requested_height, max_texture_size, clamped_width, clamped_height
318 );
319 }
320
321 let surface_config = wgpu::SurfaceConfiguration {
322 usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
323 format: surface_format,
324 width: clamped_width.max(1),
325 height: clamped_height.max(1),
326 present_mode: config
327 .preferred_present_mode
328 .filter(|mode| surface_caps.present_modes.contains(mode))
329 .unwrap_or(wgpu::PresentMode::Fifo),
330 desired_maximum_frame_latency: 2,
331 alpha_mode,
332 view_formats: vec![],
333 };
334 // Configure the surface immediately. The adapter selection process already validated
335 // that this adapter can successfully configure this surface.
336 surface.configure(&context.device, &surface_config);
337
338 let queue = Arc::clone(&context.queue);
339 let dual_source_blending = context.supports_dual_source_blending();
340
341 let rendering_params = RenderingParameters::new(&context.adapter, surface_format);
342 let bind_group_layouts = Self::create_bind_group_layouts(&device);
343 let pipelines = Self::create_pipelines(
344 &device,
345 &bind_group_layouts,
346 surface_format,
347 alpha_mode,
348 rendering_params.path_sample_count,
349 dual_source_blending,
350 );
351
352 let atlas_sampler = device.create_sampler(&wgpu::SamplerDescriptor {
353 label: Some("atlas_sampler"),
354 mag_filter: wgpu::FilterMode::Linear,
355 min_filter: wgpu::FilterMode::Linear,
356 ..Default::default()
357 });
358
359 let uniform_alignment = device.limits().min_uniform_buffer_offset_alignment as u64;
360 let globals_size = std::mem::size_of::<GlobalParams>() as u64;
361 let gamma_size = std::mem::size_of::<GammaParams>() as u64;
362 let path_globals_offset = globals_size.next_multiple_of(uniform_alignment);
363 let gamma_offset = (path_globals_offset + globals_size).next_multiple_of(uniform_alignment);
364
365 let globals_buffer = device.create_buffer(&wgpu::BufferDescriptor {
366 label: Some("globals_buffer"),
367 size: gamma_offset + gamma_size,
368 usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
369 mapped_at_creation: false,
370 });
371
372 let max_buffer_size = device.limits().max_buffer_size;
373 let storage_buffer_alignment = device.limits().min_storage_buffer_offset_alignment as u64;
374 let initial_instance_buffer_capacity = 2 * 1024 * 1024;
375 let instance_buffer = device.create_buffer(&wgpu::BufferDescriptor {
376 label: Some("instance_buffer"),
377 size: initial_instance_buffer_capacity,
378 usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
379 mapped_at_creation: false,
380 });
381
382 let globals_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
383 label: Some("globals_bind_group"),
384 layout: &bind_group_layouts.globals,
385 entries: &[
386 wgpu::BindGroupEntry {
387 binding: 0,
388 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
389 buffer: &globals_buffer,
390 offset: 0,
391 size: Some(NonZeroU64::new(globals_size).unwrap()),
392 }),
393 },
394 wgpu::BindGroupEntry {
395 binding: 1,
396 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
397 buffer: &globals_buffer,
398 offset: gamma_offset,
399 size: Some(NonZeroU64::new(gamma_size).unwrap()),
400 }),
401 },
402 ],
403 });
404
405 let path_globals_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
406 label: Some("path_globals_bind_group"),
407 layout: &bind_group_layouts.globals,
408 entries: &[
409 wgpu::BindGroupEntry {
410 binding: 0,
411 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
412 buffer: &globals_buffer,
413 offset: path_globals_offset,
414 size: Some(NonZeroU64::new(globals_size).unwrap()),
415 }),
416 },
417 wgpu::BindGroupEntry {
418 binding: 1,
419 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
420 buffer: &globals_buffer,
421 offset: gamma_offset,
422 size: Some(NonZeroU64::new(gamma_size).unwrap()),
423 }),
424 },
425 ],
426 });
427
428 let adapter_info = context.adapter.get_info();
429
430 let last_error: Arc<Mutex<Option<String>>> = Arc::new(Mutex::new(None));
431 let last_error_clone = Arc::clone(&last_error);
432 device.on_uncaptured_error(Arc::new(move |error| {
433 let mut guard = last_error_clone.lock().unwrap();
434 *guard = Some(error.to_string());
435 }));
436
437 let resources = WgpuResources {
438 device,
439 queue,
440 surface,
441 pipelines,
442 bind_group_layouts,
443 atlas_sampler,
444 globals_buffer,
445 globals_bind_group,
446 path_globals_bind_group,
447 instance_buffer,
448 // Defer intermediate texture creation to first draw call via ensure_intermediate_textures().
449 // This avoids panics when the device/surface is in an invalid state during initialization.
450 path_intermediate_texture: None,
451 path_intermediate_view: None,
452 path_msaa_texture: None,
453 path_msaa_view: None,
454 };
455
456 Ok(Self {
457 context: gpu_context,
458 compositor_gpu,
459 resources: Some(resources),
460 surface_config,
461 atlas,
462 path_globals_offset,
463 gamma_offset,
464 instance_buffer_capacity: initial_instance_buffer_capacity,
465 max_buffer_size,
466 storage_buffer_alignment,
467 rendering_params,
468 dual_source_blending,
469 adapter_info,
470 transparent_alpha_mode,
471 opaque_alpha_mode,
472 max_texture_size,
473 last_error,
474 failed_frame_count: 0,
475 device_lost: context.device_lost_flag(),
476 surface_configured: true,
477 })
478 }
479
480 fn create_bind_group_layouts(device: &wgpu::Device) -> WgpuBindGroupLayouts {
481 let globals =
482 device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
483 label: Some("globals_layout"),
484 entries: &[
485 wgpu::BindGroupLayoutEntry {
486 binding: 0,
487 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
488 ty: wgpu::BindingType::Buffer {
489 ty: wgpu::BufferBindingType::Uniform,
490 has_dynamic_offset: false,
491 min_binding_size: NonZeroU64::new(
492 std::mem::size_of::<GlobalParams>() as u64
493 ),
494 },
495 count: None,
496 },
497 wgpu::BindGroupLayoutEntry {
498 binding: 1,
499 visibility: wgpu::ShaderStages::FRAGMENT,
500 ty: wgpu::BindingType::Buffer {
501 ty: wgpu::BufferBindingType::Uniform,
502 has_dynamic_offset: false,
503 min_binding_size: NonZeroU64::new(
504 std::mem::size_of::<GammaParams>() as u64
505 ),
506 },
507 count: None,
508 },
509 ],
510 });
511
512 let storage_buffer_entry = |binding: u32| wgpu::BindGroupLayoutEntry {
513 binding,
514 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
515 ty: wgpu::BindingType::Buffer {
516 ty: wgpu::BufferBindingType::Storage { read_only: true },
517 has_dynamic_offset: false,
518 min_binding_size: None,
519 },
520 count: None,
521 };
522
523 let instances = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
524 label: Some("instances_layout"),
525 entries: &[storage_buffer_entry(0)],
526 });
527
528 let instances_with_texture =
529 device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
530 label: Some("instances_with_texture_layout"),
531 entries: &[
532 storage_buffer_entry(0),
533 wgpu::BindGroupLayoutEntry {
534 binding: 1,
535 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
536 ty: wgpu::BindingType::Texture {
537 sample_type: wgpu::TextureSampleType::Float { filterable: true },
538 view_dimension: wgpu::TextureViewDimension::D2,
539 multisampled: false,
540 },
541 count: None,
542 },
543 wgpu::BindGroupLayoutEntry {
544 binding: 2,
545 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
546 ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
547 count: None,
548 },
549 ],
550 });
551
552 let surfaces = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
553 label: Some("surfaces_layout"),
554 entries: &[
555 wgpu::BindGroupLayoutEntry {
556 binding: 0,
557 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
558 ty: wgpu::BindingType::Buffer {
559 ty: wgpu::BufferBindingType::Uniform,
560 has_dynamic_offset: false,
561 min_binding_size: NonZeroU64::new(
562 std::mem::size_of::<SurfaceParams>() as u64
563 ),
564 },
565 count: None,
566 },
567 wgpu::BindGroupLayoutEntry {
568 binding: 1,
569 visibility: wgpu::ShaderStages::FRAGMENT,
570 ty: wgpu::BindingType::Texture {
571 sample_type: wgpu::TextureSampleType::Float { filterable: true },
572 view_dimension: wgpu::TextureViewDimension::D2,
573 multisampled: false,
574 },
575 count: None,
576 },
577 wgpu::BindGroupLayoutEntry {
578 binding: 2,
579 visibility: wgpu::ShaderStages::FRAGMENT,
580 ty: wgpu::BindingType::Texture {
581 sample_type: wgpu::TextureSampleType::Float { filterable: true },
582 view_dimension: wgpu::TextureViewDimension::D2,
583 multisampled: false,
584 },
585 count: None,
586 },
587 wgpu::BindGroupLayoutEntry {
588 binding: 3,
589 visibility: wgpu::ShaderStages::FRAGMENT,
590 ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
591 count: None,
592 },
593 ],
594 });
595
596 WgpuBindGroupLayouts {
597 globals,
598 instances,
599 instances_with_texture,
600 surfaces,
601 }
602 }
603
604 fn create_pipelines(
605 device: &wgpu::Device,
606 layouts: &WgpuBindGroupLayouts,
607 surface_format: wgpu::TextureFormat,
608 alpha_mode: wgpu::CompositeAlphaMode,
609 path_sample_count: u32,
610 dual_source_blending: bool,
611 ) -> WgpuPipelines {
612 // Diagnostic guard: verify the device actually has
613 // DUAL_SOURCE_BLENDING. We have a crash report (ZED-5G1) where a
614 // feature mismatch caused a wgpu-hal abort, but we haven't
615 // identified the code path that produces the mismatch. This
616 // guard prevents the crash and logs more evidence.
617 // Remove this check once:
618 // a) We find and fix the root cause, or
619 // b) There are no reports of this warning appearing for some time.
620 let device_has_feature = device
621 .features()
622 .contains(wgpu::Features::DUAL_SOURCE_BLENDING);
623 if dual_source_blending && !device_has_feature {
624 log::error!(
625 "BUG: dual_source_blending flag is true but device does not \
626 have DUAL_SOURCE_BLENDING enabled (device features: {:?}). \
627 Falling back to mono text rendering. Please report this at \
628 https://github.com/zed-industries/zed/issues",
629 device.features(),
630 );
631 }
632 let dual_source_blending = dual_source_blending && device_has_feature;
633
634 let base_shader_source = include_str!("shaders.wgsl");
635 let shader_module = device.create_shader_module(wgpu::ShaderModuleDescriptor {
636 label: Some("gpui_shaders"),
637 source: wgpu::ShaderSource::Wgsl(std::borrow::Cow::Borrowed(base_shader_source)),
638 });
639
640 let subpixel_shader_source = include_str!("shaders_subpixel.wgsl");
641 let subpixel_shader_module = if dual_source_blending {
642 let combined = format!(
643 "enable dual_source_blending;\n{base_shader_source}\n{subpixel_shader_source}"
644 );
645 Some(device.create_shader_module(wgpu::ShaderModuleDescriptor {
646 label: Some("gpui_subpixel_shaders"),
647 source: wgpu::ShaderSource::Wgsl(std::borrow::Cow::Owned(combined)),
648 }))
649 } else {
650 None
651 };
652
653 let blend_mode = match alpha_mode {
654 wgpu::CompositeAlphaMode::PreMultiplied => {
655 wgpu::BlendState::PREMULTIPLIED_ALPHA_BLENDING
656 }
657 _ => wgpu::BlendState::ALPHA_BLENDING,
658 };
659
660 let color_target = wgpu::ColorTargetState {
661 format: surface_format,
662 blend: Some(blend_mode),
663 write_mask: wgpu::ColorWrites::ALL,
664 };
665
666 let create_pipeline = |name: &str,
667 vs_entry: &str,
668 fs_entry: &str,
669 globals_layout: &wgpu::BindGroupLayout,
670 data_layout: &wgpu::BindGroupLayout,
671 topology: wgpu::PrimitiveTopology,
672 color_targets: &[Option<wgpu::ColorTargetState>],
673 sample_count: u32,
674 module: &wgpu::ShaderModule| {
675 let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
676 label: Some(&format!("{name}_layout")),
677 bind_group_layouts: &[Some(globals_layout), Some(data_layout)],
678 immediate_size: 0,
679 });
680
681 device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
682 label: Some(name),
683 layout: Some(&pipeline_layout),
684 vertex: wgpu::VertexState {
685 module,
686 entry_point: Some(vs_entry),
687 buffers: &[],
688 compilation_options: wgpu::PipelineCompilationOptions::default(),
689 },
690 fragment: Some(wgpu::FragmentState {
691 module,
692 entry_point: Some(fs_entry),
693 targets: color_targets,
694 compilation_options: wgpu::PipelineCompilationOptions::default(),
695 }),
696 primitive: wgpu::PrimitiveState {
697 topology,
698 strip_index_format: None,
699 front_face: wgpu::FrontFace::Ccw,
700 cull_mode: None,
701 polygon_mode: wgpu::PolygonMode::Fill,
702 unclipped_depth: false,
703 conservative: false,
704 },
705 depth_stencil: None,
706 multisample: wgpu::MultisampleState {
707 count: sample_count,
708 mask: !0,
709 alpha_to_coverage_enabled: false,
710 },
711 multiview_mask: None,
712 cache: None,
713 })
714 };
715
716 let quads = create_pipeline(
717 "quads",
718 "vs_quad",
719 "fs_quad",
720 &layouts.globals,
721 &layouts.instances,
722 wgpu::PrimitiveTopology::TriangleStrip,
723 &[Some(color_target.clone())],
724 1,
725 &shader_module,
726 );
727
728 let shadows = create_pipeline(
729 "shadows",
730 "vs_shadow",
731 "fs_shadow",
732 &layouts.globals,
733 &layouts.instances,
734 wgpu::PrimitiveTopology::TriangleStrip,
735 &[Some(color_target.clone())],
736 1,
737 &shader_module,
738 );
739
740 let path_rasterization = create_pipeline(
741 "path_rasterization",
742 "vs_path_rasterization",
743 "fs_path_rasterization",
744 &layouts.globals,
745 &layouts.instances,
746 wgpu::PrimitiveTopology::TriangleList,
747 &[Some(wgpu::ColorTargetState {
748 format: surface_format,
749 blend: Some(wgpu::BlendState::PREMULTIPLIED_ALPHA_BLENDING),
750 write_mask: wgpu::ColorWrites::ALL,
751 })],
752 path_sample_count,
753 &shader_module,
754 );
755
756 let paths_blend = wgpu::BlendState {
757 color: wgpu::BlendComponent {
758 src_factor: wgpu::BlendFactor::One,
759 dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
760 operation: wgpu::BlendOperation::Add,
761 },
762 alpha: wgpu::BlendComponent {
763 src_factor: wgpu::BlendFactor::One,
764 dst_factor: wgpu::BlendFactor::One,
765 operation: wgpu::BlendOperation::Add,
766 },
767 };
768
769 let paths = create_pipeline(
770 "paths",
771 "vs_path",
772 "fs_path",
773 &layouts.globals,
774 &layouts.instances_with_texture,
775 wgpu::PrimitiveTopology::TriangleStrip,
776 &[Some(wgpu::ColorTargetState {
777 format: surface_format,
778 blend: Some(paths_blend),
779 write_mask: wgpu::ColorWrites::ALL,
780 })],
781 1,
782 &shader_module,
783 );
784
785 let underlines = create_pipeline(
786 "underlines",
787 "vs_underline",
788 "fs_underline",
789 &layouts.globals,
790 &layouts.instances,
791 wgpu::PrimitiveTopology::TriangleStrip,
792 &[Some(color_target.clone())],
793 1,
794 &shader_module,
795 );
796
797 let mono_sprites = create_pipeline(
798 "mono_sprites",
799 "vs_mono_sprite",
800 "fs_mono_sprite",
801 &layouts.globals,
802 &layouts.instances_with_texture,
803 wgpu::PrimitiveTopology::TriangleStrip,
804 &[Some(color_target.clone())],
805 1,
806 &shader_module,
807 );
808
809 let subpixel_sprites = if let Some(subpixel_module) = &subpixel_shader_module {
810 let subpixel_blend = wgpu::BlendState {
811 color: wgpu::BlendComponent {
812 src_factor: wgpu::BlendFactor::Src1,
813 dst_factor: wgpu::BlendFactor::OneMinusSrc1,
814 operation: wgpu::BlendOperation::Add,
815 },
816 alpha: wgpu::BlendComponent {
817 src_factor: wgpu::BlendFactor::One,
818 dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
819 operation: wgpu::BlendOperation::Add,
820 },
821 };
822
823 Some(create_pipeline(
824 "subpixel_sprites",
825 "vs_subpixel_sprite",
826 "fs_subpixel_sprite",
827 &layouts.globals,
828 &layouts.instances_with_texture,
829 wgpu::PrimitiveTopology::TriangleStrip,
830 &[Some(wgpu::ColorTargetState {
831 format: surface_format,
832 blend: Some(subpixel_blend),
833 write_mask: wgpu::ColorWrites::COLOR,
834 })],
835 1,
836 subpixel_module,
837 ))
838 } else {
839 None
840 };
841
842 let poly_sprites = create_pipeline(
843 "poly_sprites",
844 "vs_poly_sprite",
845 "fs_poly_sprite",
846 &layouts.globals,
847 &layouts.instances_with_texture,
848 wgpu::PrimitiveTopology::TriangleStrip,
849 &[Some(color_target.clone())],
850 1,
851 &shader_module,
852 );
853
854 let surfaces = create_pipeline(
855 "surfaces",
856 "vs_surface",
857 "fs_surface",
858 &layouts.globals,
859 &layouts.surfaces,
860 wgpu::PrimitiveTopology::TriangleStrip,
861 &[Some(color_target)],
862 1,
863 &shader_module,
864 );
865
866 WgpuPipelines {
867 quads,
868 shadows,
869 path_rasterization,
870 paths,
871 underlines,
872 mono_sprites,
873 subpixel_sprites,
874 poly_sprites,
875 surfaces,
876 }
877 }
878
879 fn create_path_intermediate(
880 device: &wgpu::Device,
881 format: wgpu::TextureFormat,
882 width: u32,
883 height: u32,
884 ) -> (wgpu::Texture, wgpu::TextureView) {
885 let texture = device.create_texture(&wgpu::TextureDescriptor {
886 label: Some("path_intermediate"),
887 size: wgpu::Extent3d {
888 width: width.max(1),
889 height: height.max(1),
890 depth_or_array_layers: 1,
891 },
892 mip_level_count: 1,
893 sample_count: 1,
894 dimension: wgpu::TextureDimension::D2,
895 format,
896 usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::TEXTURE_BINDING,
897 view_formats: &[],
898 });
899 let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
900 (texture, view)
901 }
902
903 fn create_msaa_if_needed(
904 device: &wgpu::Device,
905 format: wgpu::TextureFormat,
906 width: u32,
907 height: u32,
908 sample_count: u32,
909 ) -> Option<(wgpu::Texture, wgpu::TextureView)> {
910 if sample_count <= 1 {
911 return None;
912 }
913 let texture = device.create_texture(&wgpu::TextureDescriptor {
914 label: Some("path_msaa"),
915 size: wgpu::Extent3d {
916 width: width.max(1),
917 height: height.max(1),
918 depth_or_array_layers: 1,
919 },
920 mip_level_count: 1,
921 sample_count,
922 dimension: wgpu::TextureDimension::D2,
923 format,
924 usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
925 view_formats: &[],
926 });
927 let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
928 Some((texture, view))
929 }
930
931 pub fn update_drawable_size(&mut self, size: Size<DevicePixels>) {
932 let width = size.width.0 as u32;
933 let height = size.height.0 as u32;
934
935 if width != self.surface_config.width || height != self.surface_config.height {
936 let clamped_width = width.min(self.max_texture_size);
937 let clamped_height = height.min(self.max_texture_size);
938
939 if clamped_width != width || clamped_height != height {
940 warn!(
941 "Requested surface size ({}, {}) exceeds maximum texture dimension {}. \
942 Clamping to ({}, {}). Window content may not fill the entire window.",
943 width, height, self.max_texture_size, clamped_width, clamped_height
944 );
945 }
946
947 self.surface_config.width = clamped_width.max(1);
948 self.surface_config.height = clamped_height.max(1);
949 let surface_config = self.surface_config.clone();
950
951 let resources = self.resources_mut();
952
953 // Wait for any in-flight GPU work to complete before destroying textures
954 if let Err(e) = resources.device.poll(wgpu::PollType::Wait {
955 submission_index: None,
956 timeout: None,
957 }) {
958 warn!("Failed to poll device during resize: {e:?}");
959 }
960
961 // Destroy old textures before allocating new ones to avoid GPU memory spikes
962 if let Some(ref texture) = resources.path_intermediate_texture {
963 texture.destroy();
964 }
965 if let Some(ref texture) = resources.path_msaa_texture {
966 texture.destroy();
967 }
968
969 resources
970 .surface
971 .configure(&resources.device, &surface_config);
972
973 // Invalidate intermediate textures - they will be lazily recreated
974 // in draw() after we confirm the surface is healthy. This avoids
975 // panics when the device/surface is in an invalid state during resize.
976 resources.path_intermediate_texture = None;
977 resources.path_intermediate_view = None;
978 resources.path_msaa_texture = None;
979 resources.path_msaa_view = None;
980 }
981 }
982
983 fn ensure_intermediate_textures(&mut self) {
984 if self.resources().path_intermediate_texture.is_some() {
985 return;
986 }
987
988 let format = self.surface_config.format;
989 let width = self.surface_config.width;
990 let height = self.surface_config.height;
991 let path_sample_count = self.rendering_params.path_sample_count;
992 let resources = self.resources_mut();
993
994 let (t, v) = Self::create_path_intermediate(&resources.device, format, width, height);
995 resources.path_intermediate_texture = Some(t);
996 resources.path_intermediate_view = Some(v);
997
998 let (path_msaa_texture, path_msaa_view) = Self::create_msaa_if_needed(
999 &resources.device,
1000 format,
1001 width,
1002 height,
1003 path_sample_count,
1004 )
1005 .map(|(t, v)| (Some(t), Some(v)))
1006 .unwrap_or((None, None));
1007 resources.path_msaa_texture = path_msaa_texture;
1008 resources.path_msaa_view = path_msaa_view;
1009 }
1010
1011 pub fn update_transparency(&mut self, transparent: bool) {
1012 let new_alpha_mode = if transparent {
1013 self.transparent_alpha_mode
1014 } else {
1015 self.opaque_alpha_mode
1016 };
1017
1018 if new_alpha_mode != self.surface_config.alpha_mode {
1019 self.surface_config.alpha_mode = new_alpha_mode;
1020 let surface_config = self.surface_config.clone();
1021 let path_sample_count = self.rendering_params.path_sample_count;
1022 let dual_source_blending = self.dual_source_blending;
1023 let resources = self.resources_mut();
1024 resources
1025 .surface
1026 .configure(&resources.device, &surface_config);
1027 resources.pipelines = Self::create_pipelines(
1028 &resources.device,
1029 &resources.bind_group_layouts,
1030 surface_config.format,
1031 surface_config.alpha_mode,
1032 path_sample_count,
1033 dual_source_blending,
1034 );
1035 }
1036 }
1037
1038 #[allow(dead_code)]
1039 pub fn viewport_size(&self) -> Size<DevicePixels> {
1040 Size {
1041 width: DevicePixels(self.surface_config.width as i32),
1042 height: DevicePixels(self.surface_config.height as i32),
1043 }
1044 }
1045
1046 pub fn sprite_atlas(&self) -> &Arc<WgpuAtlas> {
1047 &self.atlas
1048 }
1049
1050 pub fn supports_dual_source_blending(&self) -> bool {
1051 self.dual_source_blending
1052 }
1053
1054 pub fn gpu_specs(&self) -> GpuSpecs {
1055 GpuSpecs {
1056 is_software_emulated: self.adapter_info.device_type == wgpu::DeviceType::Cpu,
1057 device_name: self.adapter_info.name.clone(),
1058 driver_name: self.adapter_info.driver.clone(),
1059 driver_info: self.adapter_info.driver_info.clone(),
1060 }
1061 }
1062
1063 pub fn max_texture_size(&self) -> u32 {
1064 self.max_texture_size
1065 }
1066
1067 pub fn draw(&mut self, scene: &Scene) {
1068 // Bail out early if the surface has been unconfigured (e.g. during
1069 // Android background/rotation transitions). Attempting to acquire
1070 // a texture from an unconfigured surface can block indefinitely on
1071 // some drivers (Adreno).
1072 if !self.surface_configured {
1073 return;
1074 }
1075
1076 let last_error = self.last_error.lock().unwrap().take();
1077 if let Some(error) = last_error {
1078 self.failed_frame_count += 1;
1079 log::error!(
1080 "GPU error during frame (failure {} of 20): {error}",
1081 self.failed_frame_count
1082 );
1083 if self.failed_frame_count > 20 {
1084 panic!("Too many consecutive GPU errors. Last error: {error}");
1085 }
1086 } else {
1087 self.failed_frame_count = 0;
1088 }
1089
1090 self.atlas.before_frame();
1091
1092 let frame = match self.resources().surface.get_current_texture() {
1093 wgpu::CurrentSurfaceTexture::Success(frame) => frame,
1094 wgpu::CurrentSurfaceTexture::Suboptimal(frame) => {
1095 // Textures must be destroyed before the surface can be reconfigured.
1096 drop(frame);
1097 let surface_config = self.surface_config.clone();
1098 let resources = self.resources_mut();
1099 resources
1100 .surface
1101 .configure(&resources.device, &surface_config);
1102 return;
1103 }
1104 wgpu::CurrentSurfaceTexture::Lost | wgpu::CurrentSurfaceTexture::Outdated => {
1105 let surface_config = self.surface_config.clone();
1106 let resources = self.resources_mut();
1107 resources
1108 .surface
1109 .configure(&resources.device, &surface_config);
1110 return;
1111 }
1112 wgpu::CurrentSurfaceTexture::Timeout | wgpu::CurrentSurfaceTexture::Occluded => {
1113 return;
1114 }
1115 wgpu::CurrentSurfaceTexture::Validation => {
1116 *self.last_error.lock().unwrap() =
1117 Some("Surface texture validation error".to_string());
1118 return;
1119 }
1120 };
1121
1122 // Now that we know the surface is healthy, ensure intermediate textures exist
1123 self.ensure_intermediate_textures();
1124
1125 let frame_view = frame
1126 .texture
1127 .create_view(&wgpu::TextureViewDescriptor::default());
1128
1129 let gamma_params = GammaParams {
1130 gamma_ratios: self.rendering_params.gamma_ratios,
1131 grayscale_enhanced_contrast: self.rendering_params.grayscale_enhanced_contrast,
1132 subpixel_enhanced_contrast: self.rendering_params.subpixel_enhanced_contrast,
1133 _pad: [0.0; 2],
1134 };
1135
1136 let globals = GlobalParams {
1137 viewport_size: [
1138 self.surface_config.width as f32,
1139 self.surface_config.height as f32,
1140 ],
1141 premultiplied_alpha: if self.surface_config.alpha_mode
1142 == wgpu::CompositeAlphaMode::PreMultiplied
1143 {
1144 1
1145 } else {
1146 0
1147 },
1148 pad: 0,
1149 };
1150
1151 let path_globals = GlobalParams {
1152 premultiplied_alpha: 0,
1153 ..globals
1154 };
1155
1156 {
1157 let resources = self.resources();
1158 resources.queue.write_buffer(
1159 &resources.globals_buffer,
1160 0,
1161 bytemuck::bytes_of(&globals),
1162 );
1163 resources.queue.write_buffer(
1164 &resources.globals_buffer,
1165 self.path_globals_offset,
1166 bytemuck::bytes_of(&path_globals),
1167 );
1168 resources.queue.write_buffer(
1169 &resources.globals_buffer,
1170 self.gamma_offset,
1171 bytemuck::bytes_of(&gamma_params),
1172 );
1173 }
1174
1175 loop {
1176 let mut instance_offset: u64 = 0;
1177 let mut overflow = false;
1178
1179 let mut encoder =
1180 self.resources()
1181 .device
1182 .create_command_encoder(&wgpu::CommandEncoderDescriptor {
1183 label: Some("main_encoder"),
1184 });
1185
1186 {
1187 let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
1188 label: Some("main_pass"),
1189 color_attachments: &[Some(wgpu::RenderPassColorAttachment {
1190 view: &frame_view,
1191 resolve_target: None,
1192 ops: wgpu::Operations {
1193 load: wgpu::LoadOp::Clear(wgpu::Color::TRANSPARENT),
1194 store: wgpu::StoreOp::Store,
1195 },
1196 depth_slice: None,
1197 })],
1198 depth_stencil_attachment: None,
1199 ..Default::default()
1200 });
1201
1202 for batch in scene.batches() {
1203 let ok = match batch {
1204 PrimitiveBatch::Quads(range) => {
1205 self.draw_quads(&scene.quads[range], &mut instance_offset, &mut pass)
1206 }
1207 PrimitiveBatch::Shadows(range) => self.draw_shadows(
1208 &scene.shadows[range],
1209 &mut instance_offset,
1210 &mut pass,
1211 ),
1212 PrimitiveBatch::Paths(range) => {
1213 let paths = &scene.paths[range];
1214 if paths.is_empty() {
1215 continue;
1216 }
1217
1218 drop(pass);
1219
1220 let did_draw = self.draw_paths_to_intermediate(
1221 &mut encoder,
1222 paths,
1223 &mut instance_offset,
1224 );
1225
1226 pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
1227 label: Some("main_pass_continued"),
1228 color_attachments: &[Some(wgpu::RenderPassColorAttachment {
1229 view: &frame_view,
1230 resolve_target: None,
1231 ops: wgpu::Operations {
1232 load: wgpu::LoadOp::Load,
1233 store: wgpu::StoreOp::Store,
1234 },
1235 depth_slice: None,
1236 })],
1237 depth_stencil_attachment: None,
1238 ..Default::default()
1239 });
1240
1241 if did_draw {
1242 self.draw_paths_from_intermediate(
1243 paths,
1244 &mut instance_offset,
1245 &mut pass,
1246 )
1247 } else {
1248 false
1249 }
1250 }
1251 PrimitiveBatch::Underlines(range) => self.draw_underlines(
1252 &scene.underlines[range],
1253 &mut instance_offset,
1254 &mut pass,
1255 ),
1256 PrimitiveBatch::MonochromeSprites { texture_id, range } => self
1257 .draw_monochrome_sprites(
1258 &scene.monochrome_sprites[range],
1259 texture_id,
1260 &mut instance_offset,
1261 &mut pass,
1262 ),
1263 PrimitiveBatch::SubpixelSprites { texture_id, range } => self
1264 .draw_subpixel_sprites(
1265 &scene.subpixel_sprites[range],
1266 texture_id,
1267 &mut instance_offset,
1268 &mut pass,
1269 ),
1270 PrimitiveBatch::PolychromeSprites { texture_id, range } => self
1271 .draw_polychrome_sprites(
1272 &scene.polychrome_sprites[range],
1273 texture_id,
1274 &mut instance_offset,
1275 &mut pass,
1276 ),
1277 PrimitiveBatch::Surfaces(_surfaces) => {
1278 // Surfaces are macOS-only for video playback
1279 // Not implemented for Linux/wgpu
1280 true
1281 }
1282 };
1283 if !ok {
1284 overflow = true;
1285 break;
1286 }
1287 }
1288 }
1289
1290 if overflow {
1291 drop(encoder);
1292 if self.instance_buffer_capacity >= self.max_buffer_size {
1293 log::error!(
1294 "instance buffer size grew too large: {}",
1295 self.instance_buffer_capacity
1296 );
1297 frame.present();
1298 return;
1299 }
1300 self.grow_instance_buffer();
1301 continue;
1302 }
1303
1304 self.resources()
1305 .queue
1306 .submit(std::iter::once(encoder.finish()));
1307 frame.present();
1308 return;
1309 }
1310 }
1311
1312 fn draw_quads(
1313 &self,
1314 quads: &[Quad],
1315 instance_offset: &mut u64,
1316 pass: &mut wgpu::RenderPass<'_>,
1317 ) -> bool {
1318 let data = unsafe { Self::instance_bytes(quads) };
1319 self.draw_instances(
1320 data,
1321 quads.len() as u32,
1322 &self.resources().pipelines.quads,
1323 instance_offset,
1324 pass,
1325 )
1326 }
1327
1328 fn draw_shadows(
1329 &self,
1330 shadows: &[Shadow],
1331 instance_offset: &mut u64,
1332 pass: &mut wgpu::RenderPass<'_>,
1333 ) -> bool {
1334 let data = unsafe { Self::instance_bytes(shadows) };
1335 self.draw_instances(
1336 data,
1337 shadows.len() as u32,
1338 &self.resources().pipelines.shadows,
1339 instance_offset,
1340 pass,
1341 )
1342 }
1343
1344 fn draw_underlines(
1345 &self,
1346 underlines: &[Underline],
1347 instance_offset: &mut u64,
1348 pass: &mut wgpu::RenderPass<'_>,
1349 ) -> bool {
1350 let data = unsafe { Self::instance_bytes(underlines) };
1351 self.draw_instances(
1352 data,
1353 underlines.len() as u32,
1354 &self.resources().pipelines.underlines,
1355 instance_offset,
1356 pass,
1357 )
1358 }
1359
1360 fn draw_monochrome_sprites(
1361 &self,
1362 sprites: &[MonochromeSprite],
1363 texture_id: AtlasTextureId,
1364 instance_offset: &mut u64,
1365 pass: &mut wgpu::RenderPass<'_>,
1366 ) -> bool {
1367 let tex_info = self.atlas.get_texture_info(texture_id);
1368 let data = unsafe { Self::instance_bytes(sprites) };
1369 self.draw_instances_with_texture(
1370 data,
1371 sprites.len() as u32,
1372 &tex_info.view,
1373 &self.resources().pipelines.mono_sprites,
1374 instance_offset,
1375 pass,
1376 )
1377 }
1378
1379 fn draw_subpixel_sprites(
1380 &self,
1381 sprites: &[SubpixelSprite],
1382 texture_id: AtlasTextureId,
1383 instance_offset: &mut u64,
1384 pass: &mut wgpu::RenderPass<'_>,
1385 ) -> bool {
1386 let tex_info = self.atlas.get_texture_info(texture_id);
1387 let data = unsafe { Self::instance_bytes(sprites) };
1388 let resources = self.resources();
1389 let pipeline = resources
1390 .pipelines
1391 .subpixel_sprites
1392 .as_ref()
1393 .unwrap_or(&resources.pipelines.mono_sprites);
1394 self.draw_instances_with_texture(
1395 data,
1396 sprites.len() as u32,
1397 &tex_info.view,
1398 pipeline,
1399 instance_offset,
1400 pass,
1401 )
1402 }
1403
1404 fn draw_polychrome_sprites(
1405 &self,
1406 sprites: &[PolychromeSprite],
1407 texture_id: AtlasTextureId,
1408 instance_offset: &mut u64,
1409 pass: &mut wgpu::RenderPass<'_>,
1410 ) -> bool {
1411 let tex_info = self.atlas.get_texture_info(texture_id);
1412 let data = unsafe { Self::instance_bytes(sprites) };
1413 self.draw_instances_with_texture(
1414 data,
1415 sprites.len() as u32,
1416 &tex_info.view,
1417 &self.resources().pipelines.poly_sprites,
1418 instance_offset,
1419 pass,
1420 )
1421 }
1422
1423 fn draw_instances(
1424 &self,
1425 data: &[u8],
1426 instance_count: u32,
1427 pipeline: &wgpu::RenderPipeline,
1428 instance_offset: &mut u64,
1429 pass: &mut wgpu::RenderPass<'_>,
1430 ) -> bool {
1431 if instance_count == 0 {
1432 return true;
1433 }
1434 let Some((offset, size)) = self.write_to_instance_buffer(instance_offset, data) else {
1435 return false;
1436 };
1437 let resources = self.resources();
1438 let bind_group = resources
1439 .device
1440 .create_bind_group(&wgpu::BindGroupDescriptor {
1441 label: None,
1442 layout: &resources.bind_group_layouts.instances,
1443 entries: &[wgpu::BindGroupEntry {
1444 binding: 0,
1445 resource: self.instance_binding(offset, size),
1446 }],
1447 });
1448 pass.set_pipeline(pipeline);
1449 pass.set_bind_group(0, &resources.globals_bind_group, &[]);
1450 pass.set_bind_group(1, &bind_group, &[]);
1451 pass.draw(0..4, 0..instance_count);
1452 true
1453 }
1454
1455 fn draw_instances_with_texture(
1456 &self,
1457 data: &[u8],
1458 instance_count: u32,
1459 texture_view: &wgpu::TextureView,
1460 pipeline: &wgpu::RenderPipeline,
1461 instance_offset: &mut u64,
1462 pass: &mut wgpu::RenderPass<'_>,
1463 ) -> bool {
1464 if instance_count == 0 {
1465 return true;
1466 }
1467 let Some((offset, size)) = self.write_to_instance_buffer(instance_offset, data) else {
1468 return false;
1469 };
1470 let resources = self.resources();
1471 let bind_group = resources
1472 .device
1473 .create_bind_group(&wgpu::BindGroupDescriptor {
1474 label: None,
1475 layout: &resources.bind_group_layouts.instances_with_texture,
1476 entries: &[
1477 wgpu::BindGroupEntry {
1478 binding: 0,
1479 resource: self.instance_binding(offset, size),
1480 },
1481 wgpu::BindGroupEntry {
1482 binding: 1,
1483 resource: wgpu::BindingResource::TextureView(texture_view),
1484 },
1485 wgpu::BindGroupEntry {
1486 binding: 2,
1487 resource: wgpu::BindingResource::Sampler(&resources.atlas_sampler),
1488 },
1489 ],
1490 });
1491 pass.set_pipeline(pipeline);
1492 pass.set_bind_group(0, &resources.globals_bind_group, &[]);
1493 pass.set_bind_group(1, &bind_group, &[]);
1494 pass.draw(0..4, 0..instance_count);
1495 true
1496 }
1497
1498 unsafe fn instance_bytes<T>(instances: &[T]) -> &[u8] {
1499 unsafe {
1500 std::slice::from_raw_parts(
1501 instances.as_ptr() as *const u8,
1502 std::mem::size_of_val(instances),
1503 )
1504 }
1505 }
1506
1507 fn draw_paths_from_intermediate(
1508 &self,
1509 paths: &[Path<ScaledPixels>],
1510 instance_offset: &mut u64,
1511 pass: &mut wgpu::RenderPass<'_>,
1512 ) -> bool {
1513 let first_path = &paths[0];
1514 let sprites: Vec<PathSprite> = if paths.last().map(|p| &p.order) == Some(&first_path.order)
1515 {
1516 paths
1517 .iter()
1518 .map(|p| PathSprite {
1519 bounds: p.clipped_bounds(),
1520 })
1521 .collect()
1522 } else {
1523 let mut bounds = first_path.clipped_bounds();
1524 for path in paths.iter().skip(1) {
1525 bounds = bounds.union(&path.clipped_bounds());
1526 }
1527 vec![PathSprite { bounds }]
1528 };
1529
1530 let resources = self.resources();
1531 let Some(path_intermediate_view) = resources.path_intermediate_view.as_ref() else {
1532 return true;
1533 };
1534
1535 let sprite_data = unsafe { Self::instance_bytes(&sprites) };
1536 self.draw_instances_with_texture(
1537 sprite_data,
1538 sprites.len() as u32,
1539 path_intermediate_view,
1540 &resources.pipelines.paths,
1541 instance_offset,
1542 pass,
1543 )
1544 }
1545
1546 fn draw_paths_to_intermediate(
1547 &self,
1548 encoder: &mut wgpu::CommandEncoder,
1549 paths: &[Path<ScaledPixels>],
1550 instance_offset: &mut u64,
1551 ) -> bool {
1552 let mut vertices = Vec::new();
1553 for path in paths {
1554 let bounds = path.clipped_bounds();
1555 vertices.extend(path.vertices.iter().map(|v| PathRasterizationVertex {
1556 xy_position: v.xy_position,
1557 st_position: v.st_position,
1558 color: path.color,
1559 bounds,
1560 }));
1561 }
1562
1563 if vertices.is_empty() {
1564 return true;
1565 }
1566
1567 let vertex_data = unsafe { Self::instance_bytes(&vertices) };
1568 let Some((vertex_offset, vertex_size)) =
1569 self.write_to_instance_buffer(instance_offset, vertex_data)
1570 else {
1571 return false;
1572 };
1573
1574 let resources = self.resources();
1575 let data_bind_group = resources
1576 .device
1577 .create_bind_group(&wgpu::BindGroupDescriptor {
1578 label: Some("path_rasterization_bind_group"),
1579 layout: &resources.bind_group_layouts.instances,
1580 entries: &[wgpu::BindGroupEntry {
1581 binding: 0,
1582 resource: self.instance_binding(vertex_offset, vertex_size),
1583 }],
1584 });
1585
1586 let Some(path_intermediate_view) = resources.path_intermediate_view.as_ref() else {
1587 return true;
1588 };
1589
1590 let (target_view, resolve_target) = if let Some(ref msaa_view) = resources.path_msaa_view {
1591 (msaa_view, Some(path_intermediate_view))
1592 } else {
1593 (path_intermediate_view, None)
1594 };
1595
1596 {
1597 let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
1598 label: Some("path_rasterization_pass"),
1599 color_attachments: &[Some(wgpu::RenderPassColorAttachment {
1600 view: target_view,
1601 resolve_target,
1602 ops: wgpu::Operations {
1603 load: wgpu::LoadOp::Clear(wgpu::Color::TRANSPARENT),
1604 store: wgpu::StoreOp::Store,
1605 },
1606 depth_slice: None,
1607 })],
1608 depth_stencil_attachment: None,
1609 ..Default::default()
1610 });
1611
1612 pass.set_pipeline(&resources.pipelines.path_rasterization);
1613 pass.set_bind_group(0, &resources.path_globals_bind_group, &[]);
1614 pass.set_bind_group(1, &data_bind_group, &[]);
1615 pass.draw(0..vertices.len() as u32, 0..1);
1616 }
1617
1618 true
1619 }
1620
1621 fn grow_instance_buffer(&mut self) {
1622 let new_capacity = (self.instance_buffer_capacity * 2).min(self.max_buffer_size);
1623 log::info!("increased instance buffer size to {}", new_capacity);
1624 let resources = self.resources_mut();
1625 resources.instance_buffer = resources.device.create_buffer(&wgpu::BufferDescriptor {
1626 label: Some("instance_buffer"),
1627 size: new_capacity,
1628 usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
1629 mapped_at_creation: false,
1630 });
1631 self.instance_buffer_capacity = new_capacity;
1632 }
1633
1634 fn write_to_instance_buffer(
1635 &self,
1636 instance_offset: &mut u64,
1637 data: &[u8],
1638 ) -> Option<(u64, NonZeroU64)> {
1639 let offset = (*instance_offset).next_multiple_of(self.storage_buffer_alignment);
1640 let size = (data.len() as u64).max(16);
1641 if offset + size > self.instance_buffer_capacity {
1642 return None;
1643 }
1644 let resources = self.resources();
1645 resources
1646 .queue
1647 .write_buffer(&resources.instance_buffer, offset, data);
1648 *instance_offset = offset + size;
1649 Some((offset, NonZeroU64::new(size).expect("size is at least 16")))
1650 }
1651
1652 fn instance_binding(&self, offset: u64, size: NonZeroU64) -> wgpu::BindingResource<'_> {
1653 wgpu::BindingResource::Buffer(wgpu::BufferBinding {
1654 buffer: &self.resources().instance_buffer,
1655 offset,
1656 size: Some(size),
1657 })
1658 }
1659
1660 /// Mark the surface as unconfigured so rendering is skipped until a new
1661 /// surface is provided via [`replace_surface`](Self::replace_surface).
1662 ///
1663 /// This does **not** drop the renderer — the device, queue, atlas, and
1664 /// pipelines stay alive. Use this when the native window is destroyed
1665 /// (e.g. Android `TerminateWindow`) but you intend to re-create the
1666 /// surface later without losing cached atlas textures.
1667 pub fn unconfigure_surface(&mut self) {
1668 self.surface_configured = false;
1669 // Drop intermediate textures since they reference the old surface size.
1670 if let Some(res) = self.resources.as_mut() {
1671 res.path_intermediate_texture = None;
1672 res.path_intermediate_view = None;
1673 res.path_msaa_texture = None;
1674 res.path_msaa_view = None;
1675 }
1676 }
1677
1678 /// Replace the wgpu surface with a new one (e.g. after Android destroys
1679 /// and recreates the native window). Keeps the device, queue, atlas, and
1680 /// all pipelines intact so cached `AtlasTextureId`s remain valid.
1681 ///
1682 /// The `instance` **must** be the same [`wgpu::Instance`] that was used to
1683 /// create the adapter and device (i.e. from the [`WgpuContext`]). Using a
1684 /// different instance will cause a "Device does not exist" panic because
1685 /// the wgpu device is bound to its originating instance.
1686 #[cfg(not(target_family = "wasm"))]
1687 pub fn replace_surface<W: HasWindowHandle>(
1688 &mut self,
1689 window: &W,
1690 config: WgpuSurfaceConfig,
1691 instance: &wgpu::Instance,
1692 ) -> anyhow::Result<()> {
1693 let window_handle = window
1694 .window_handle()
1695 .map_err(|e| anyhow::anyhow!("Failed to get window handle: {e}"))?;
1696
1697 let surface = create_surface(instance, window_handle.as_raw())?;
1698
1699 let width = (config.size.width.0 as u32).max(1);
1700 let height = (config.size.height.0 as u32).max(1);
1701
1702 let alpha_mode = if config.transparent {
1703 self.transparent_alpha_mode
1704 } else {
1705 self.opaque_alpha_mode
1706 };
1707
1708 self.surface_config.width = width;
1709 self.surface_config.height = height;
1710 self.surface_config.alpha_mode = alpha_mode;
1711 if let Some(mode) = config.preferred_present_mode {
1712 self.surface_config.present_mode = mode;
1713 }
1714
1715 {
1716 let res = self
1717 .resources
1718 .as_mut()
1719 .expect("GPU resources not available");
1720 surface.configure(&res.device, &self.surface_config);
1721 res.surface = surface;
1722
1723 // Invalidate intermediate textures — they'll be recreated lazily.
1724 res.path_intermediate_texture = None;
1725 res.path_intermediate_view = None;
1726 res.path_msaa_texture = None;
1727 res.path_msaa_view = None;
1728 }
1729
1730 self.surface_configured = true;
1731
1732 Ok(())
1733 }
1734
1735 pub fn destroy(&mut self) {
1736 // Release surface-bound GPU resources eagerly so the underlying native
1737 // window can be destroyed before the renderer itself is dropped.
1738 self.resources.take();
1739 }
1740
1741 /// Returns true if the GPU device was lost and recovery is needed.
1742 pub fn device_lost(&self) -> bool {
1743 self.device_lost.load(std::sync::atomic::Ordering::SeqCst)
1744 }
1745
1746 /// Recovers from a lost GPU device by recreating the renderer with a new context.
1747 ///
1748 /// Call this after detecting `device_lost()` returns true.
1749 ///
1750 /// This method coordinates recovery across multiple windows:
1751 /// - The first window to call this will recreate the shared context
1752 /// - Subsequent windows will adopt the already-recovered context
1753 #[cfg(not(target_family = "wasm"))]
1754 pub fn recover<W>(&mut self, window: &W) -> anyhow::Result<()>
1755 where
1756 W: HasWindowHandle + HasDisplayHandle + std::fmt::Debug + Send + Sync + Clone + 'static,
1757 {
1758 let gpu_context = self.context.as_ref().expect("recover requires gpu_context");
1759
1760 // Check if another window already recovered the context
1761 let needs_new_context = gpu_context
1762 .borrow()
1763 .as_ref()
1764 .is_none_or(|ctx| ctx.device_lost());
1765
1766 let window_handle = window
1767 .window_handle()
1768 .map_err(|e| anyhow::anyhow!("Failed to get window handle: {e}"))?;
1769
1770 let surface = if needs_new_context {
1771 log::warn!("GPU device lost, recreating context...");
1772
1773 // Drop old resources to release Arc<Device>/Arc<Queue> and GPU resources
1774 self.resources = None;
1775 *gpu_context.borrow_mut() = None;
1776
1777 // Wait for GPU driver to stabilize (350ms copied from windows :shrug:)
1778 std::thread::sleep(std::time::Duration::from_millis(350));
1779
1780 let instance = WgpuContext::instance(Box::new(window.clone()));
1781 let surface = create_surface(&instance, window_handle.as_raw())?;
1782 let new_context = WgpuContext::new(instance, &surface, self.compositor_gpu)?;
1783 *gpu_context.borrow_mut() = Some(new_context);
1784 surface
1785 } else {
1786 let ctx_ref = gpu_context.borrow();
1787 let instance = &ctx_ref.as_ref().unwrap().instance;
1788 create_surface(instance, window_handle.as_raw())?
1789 };
1790
1791 let config = WgpuSurfaceConfig {
1792 size: gpui::Size {
1793 width: gpui::DevicePixels(self.surface_config.width as i32),
1794 height: gpui::DevicePixels(self.surface_config.height as i32),
1795 },
1796 transparent: self.surface_config.alpha_mode != wgpu::CompositeAlphaMode::Opaque,
1797 preferred_present_mode: Some(self.surface_config.present_mode),
1798 };
1799 let gpu_context = Rc::clone(gpu_context);
1800 let ctx_ref = gpu_context.borrow();
1801 let context = ctx_ref.as_ref().expect("context should exist");
1802
1803 self.resources = None;
1804 self.atlas.handle_device_lost(context);
1805
1806 *self = Self::new_internal(
1807 Some(gpu_context.clone()),
1808 context,
1809 surface,
1810 config,
1811 self.compositor_gpu,
1812 self.atlas.clone(),
1813 )?;
1814
1815 log::info!("GPU recovery complete");
1816 Ok(())
1817 }
1818}
1819
1820#[cfg(not(target_family = "wasm"))]
1821fn create_surface(
1822 instance: &wgpu::Instance,
1823 raw_window_handle: raw_window_handle::RawWindowHandle,
1824) -> anyhow::Result<wgpu::Surface<'static>> {
1825 unsafe {
1826 instance
1827 .create_surface_unsafe(wgpu::SurfaceTargetUnsafe::RawHandle {
1828 // Fall back to the display handle already provided via InstanceDescriptor::display.
1829 raw_display_handle: None,
1830 raw_window_handle,
1831 })
1832 .map_err(|e| anyhow::anyhow!("{e}"))
1833 }
1834}
1835
1836struct RenderingParameters {
1837 path_sample_count: u32,
1838 gamma_ratios: [f32; 4],
1839 grayscale_enhanced_contrast: f32,
1840 subpixel_enhanced_contrast: f32,
1841}
1842
1843impl RenderingParameters {
1844 fn new(adapter: &wgpu::Adapter, surface_format: wgpu::TextureFormat) -> Self {
1845 use std::env;
1846
1847 let format_features = adapter.get_texture_format_features(surface_format);
1848 let path_sample_count = [4, 2, 1]
1849 .into_iter()
1850 .find(|&n| format_features.flags.sample_count_supported(n))
1851 .unwrap_or(1);
1852
1853 let gamma = env::var("ZED_FONTS_GAMMA")
1854 .ok()
1855 .and_then(|v| v.parse().ok())
1856 .unwrap_or(1.8_f32)
1857 .clamp(1.0, 2.2);
1858 let gamma_ratios = get_gamma_correction_ratios(gamma);
1859
1860 let grayscale_enhanced_contrast = env::var("ZED_FONTS_GRAYSCALE_ENHANCED_CONTRAST")
1861 .ok()
1862 .and_then(|v| v.parse().ok())
1863 .unwrap_or(1.0_f32)
1864 .max(0.0);
1865
1866 let subpixel_enhanced_contrast = env::var("ZED_FONTS_SUBPIXEL_ENHANCED_CONTRAST")
1867 .ok()
1868 .and_then(|v| v.parse().ok())
1869 .unwrap_or(0.5_f32)
1870 .max(0.0);
1871
1872 Self {
1873 path_sample_count,
1874 gamma_ratios,
1875 grayscale_enhanced_contrast,
1876 subpixel_enhanced_contrast,
1877 }
1878 }
1879}