1use crate::{CompositorGpuHint, WgpuAtlas, WgpuContext};
2use bytemuck::{Pod, Zeroable};
3use gpui::{
4 AtlasTextureId, Background, Bounds, DevicePixels, GpuSpecs, MonochromeSprite, Path, Point,
5 PolychromeSprite, PrimitiveBatch, Quad, ScaledPixels, Scene, Shadow, Size, SubpixelSprite,
6 Underline, get_gamma_correction_ratios,
7};
8use log::warn;
9#[cfg(not(target_family = "wasm"))]
10use raw_window_handle::{HasDisplayHandle, HasWindowHandle};
11use std::cell::RefCell;
12use std::num::NonZeroU64;
13use std::rc::Rc;
14use std::sync::{Arc, Mutex};
15
16#[repr(C)]
17#[derive(Clone, Copy, Pod, Zeroable)]
18struct GlobalParams {
19 viewport_size: [f32; 2],
20 premultiplied_alpha: u32,
21 pad: u32,
22}
23
24#[repr(C)]
25#[derive(Clone, Copy, Pod, Zeroable)]
26struct PodBounds {
27 origin: [f32; 2],
28 size: [f32; 2],
29}
30
31impl From<Bounds<ScaledPixels>> for PodBounds {
32 fn from(bounds: Bounds<ScaledPixels>) -> Self {
33 Self {
34 origin: [bounds.origin.x.0, bounds.origin.y.0],
35 size: [bounds.size.width.0, bounds.size.height.0],
36 }
37 }
38}
39
40#[repr(C)]
41#[derive(Clone, Copy, Pod, Zeroable)]
42struct SurfaceParams {
43 bounds: PodBounds,
44 content_mask: PodBounds,
45}
46
47#[repr(C)]
48#[derive(Clone, Copy, Pod, Zeroable)]
49struct GammaParams {
50 gamma_ratios: [f32; 4],
51 grayscale_enhanced_contrast: f32,
52 subpixel_enhanced_contrast: f32,
53 _pad: [f32; 2],
54}
55
56#[derive(Clone, Debug)]
57#[repr(C)]
58struct PathSprite {
59 bounds: Bounds<ScaledPixels>,
60}
61
62#[derive(Clone, Debug)]
63#[repr(C)]
64struct PathRasterizationVertex {
65 xy_position: Point<ScaledPixels>,
66 st_position: Point<f32>,
67 color: Background,
68 bounds: Bounds<ScaledPixels>,
69}
70
71pub struct WgpuSurfaceConfig {
72 pub size: Size<DevicePixels>,
73 pub transparent: bool,
74 /// Preferred presentation mode. When `Some`, the renderer will use this
75 /// mode if supported by the surface, falling back to `Fifo`.
76 /// When `None`, defaults to `Fifo` (VSync).
77 ///
78 /// Mobile platforms may prefer `Mailbox` (triple-buffering) to avoid
79 /// blocking in `get_current_texture()` during lifecycle transitions.
80 pub preferred_present_mode: Option<wgpu::PresentMode>,
81}
82
83struct WgpuPipelines {
84 quads: wgpu::RenderPipeline,
85 shadows: wgpu::RenderPipeline,
86 path_rasterization: wgpu::RenderPipeline,
87 paths: wgpu::RenderPipeline,
88 underlines: wgpu::RenderPipeline,
89 mono_sprites: wgpu::RenderPipeline,
90 subpixel_sprites: Option<wgpu::RenderPipeline>,
91 poly_sprites: wgpu::RenderPipeline,
92 #[allow(dead_code)]
93 surfaces: wgpu::RenderPipeline,
94}
95
96struct WgpuBindGroupLayouts {
97 globals: wgpu::BindGroupLayout,
98 instances: wgpu::BindGroupLayout,
99 instances_with_texture: wgpu::BindGroupLayout,
100 surfaces: wgpu::BindGroupLayout,
101}
102
103/// Shared GPU context reference, used to coordinate device recovery across multiple windows.
104pub type GpuContext = Rc<RefCell<Option<WgpuContext>>>;
105
106/// GPU resources that must be dropped together during device recovery.
107struct WgpuResources {
108 device: Arc<wgpu::Device>,
109 queue: Arc<wgpu::Queue>,
110 surface: wgpu::Surface<'static>,
111 pipelines: WgpuPipelines,
112 bind_group_layouts: WgpuBindGroupLayouts,
113 atlas_sampler: wgpu::Sampler,
114 globals_buffer: wgpu::Buffer,
115 globals_bind_group: wgpu::BindGroup,
116 path_globals_bind_group: wgpu::BindGroup,
117 instance_buffer: wgpu::Buffer,
118 path_intermediate_texture: Option<wgpu::Texture>,
119 path_intermediate_view: Option<wgpu::TextureView>,
120 path_msaa_texture: Option<wgpu::Texture>,
121 path_msaa_view: Option<wgpu::TextureView>,
122}
123
124impl WgpuResources {
125 fn invalidate_intermediate_textures(&mut self) {
126 self.path_intermediate_texture = None;
127 self.path_intermediate_view = None;
128 self.path_msaa_texture = None;
129 self.path_msaa_view = None;
130 }
131}
132
133pub struct WgpuRenderer {
134 /// Shared GPU context for device recovery coordination (unused on WASM).
135 #[allow(dead_code)]
136 context: Option<GpuContext>,
137 /// Compositor GPU hint for adapter selection (unused on WASM).
138 #[allow(dead_code)]
139 compositor_gpu: Option<CompositorGpuHint>,
140 resources: Option<WgpuResources>,
141 surface_config: wgpu::SurfaceConfiguration,
142 atlas: Arc<WgpuAtlas>,
143 path_globals_offset: u64,
144 gamma_offset: u64,
145 instance_buffer_capacity: u64,
146 max_buffer_size: u64,
147 storage_buffer_alignment: u64,
148 rendering_params: RenderingParameters,
149 dual_source_blending: bool,
150 adapter_info: wgpu::AdapterInfo,
151 transparent_alpha_mode: wgpu::CompositeAlphaMode,
152 opaque_alpha_mode: wgpu::CompositeAlphaMode,
153 max_texture_size: u32,
154 last_error: Arc<Mutex<Option<String>>>,
155 failed_frame_count: u32,
156 device_lost: std::sync::Arc<std::sync::atomic::AtomicBool>,
157 surface_configured: bool,
158 needs_redraw: bool,
159}
160
161impl WgpuRenderer {
162 fn resources(&self) -> &WgpuResources {
163 self.resources
164 .as_ref()
165 .expect("GPU resources not available")
166 }
167
168 fn resources_mut(&mut self) -> &mut WgpuResources {
169 self.resources
170 .as_mut()
171 .expect("GPU resources not available")
172 }
173
174 /// Creates a new WgpuRenderer from raw window handles.
175 ///
176 /// The `gpu_context` is a shared reference that coordinates GPU context across
177 /// multiple windows. The first window to create a renderer will initialize the
178 /// context; subsequent windows will share it.
179 ///
180 /// # Safety
181 /// The caller must ensure that the window handle remains valid for the lifetime
182 /// of the returned renderer.
183 #[cfg(not(target_family = "wasm"))]
184 pub fn new<W>(
185 gpu_context: GpuContext,
186 window: &W,
187 config: WgpuSurfaceConfig,
188 compositor_gpu: Option<CompositorGpuHint>,
189 ) -> anyhow::Result<Self>
190 where
191 W: HasWindowHandle + HasDisplayHandle + std::fmt::Debug + Send + Sync + Clone + 'static,
192 {
193 let window_handle = window
194 .window_handle()
195 .map_err(|e| anyhow::anyhow!("Failed to get window handle: {e}"))?;
196
197 let target = wgpu::SurfaceTargetUnsafe::RawHandle {
198 // Fall back to the display handle already provided via InstanceDescriptor::display.
199 raw_display_handle: None,
200 raw_window_handle: window_handle.as_raw(),
201 };
202
203 // Use the existing context's instance if available, otherwise create a new one.
204 // The surface must be created with the same instance that will be used for
205 // adapter selection, otherwise wgpu will panic.
206 let instance = gpu_context
207 .borrow()
208 .as_ref()
209 .map(|ctx| ctx.instance.clone())
210 .unwrap_or_else(|| WgpuContext::instance(Box::new(window.clone())));
211
212 // Safety: The caller guarantees that the window handle is valid for the
213 // lifetime of this renderer. In practice, the RawWindow struct is created
214 // from the native window handles and the surface is dropped before the window.
215 let surface = unsafe {
216 instance
217 .create_surface_unsafe(target)
218 .map_err(|e| anyhow::anyhow!("Failed to create surface: {e}"))?
219 };
220
221 let mut ctx_ref = gpu_context.borrow_mut();
222 let context = match ctx_ref.as_mut() {
223 Some(context) => {
224 context.check_compatible_with_surface(&surface)?;
225 context
226 }
227 None => ctx_ref.insert(WgpuContext::new(instance, &surface, compositor_gpu)?),
228 };
229
230 let atlas = Arc::new(WgpuAtlas::from_context(context));
231
232 Self::new_internal(
233 Some(Rc::clone(&gpu_context)),
234 context,
235 surface,
236 config,
237 compositor_gpu,
238 atlas,
239 )
240 }
241
242 #[cfg(target_family = "wasm")]
243 pub fn new_from_canvas(
244 context: &WgpuContext,
245 canvas: &web_sys::HtmlCanvasElement,
246 config: WgpuSurfaceConfig,
247 ) -> anyhow::Result<Self> {
248 let surface = context
249 .instance
250 .create_surface(wgpu::SurfaceTarget::Canvas(canvas.clone()))
251 .map_err(|e| anyhow::anyhow!("Failed to create surface: {e}"))?;
252
253 let atlas = Arc::new(WgpuAtlas::from_context(context));
254
255 Self::new_internal(None, context, surface, config, None, atlas)
256 }
257
258 fn new_internal(
259 gpu_context: Option<GpuContext>,
260 context: &WgpuContext,
261 surface: wgpu::Surface<'static>,
262 config: WgpuSurfaceConfig,
263 compositor_gpu: Option<CompositorGpuHint>,
264 atlas: Arc<WgpuAtlas>,
265 ) -> anyhow::Result<Self> {
266 let surface_caps = surface.get_capabilities(&context.adapter);
267 let preferred_formats = [
268 wgpu::TextureFormat::Bgra8Unorm,
269 wgpu::TextureFormat::Rgba8Unorm,
270 ];
271 let surface_format = preferred_formats
272 .iter()
273 .find(|f| surface_caps.formats.contains(f))
274 .copied()
275 .or_else(|| surface_caps.formats.iter().find(|f| !f.is_srgb()).copied())
276 .or_else(|| surface_caps.formats.first().copied())
277 .ok_or_else(|| {
278 anyhow::anyhow!(
279 "Surface reports no supported texture formats for adapter {:?}",
280 context.adapter.get_info().name
281 )
282 })?;
283
284 let pick_alpha_mode =
285 |preferences: &[wgpu::CompositeAlphaMode]| -> anyhow::Result<wgpu::CompositeAlphaMode> {
286 preferences
287 .iter()
288 .find(|p| surface_caps.alpha_modes.contains(p))
289 .copied()
290 .or_else(|| surface_caps.alpha_modes.first().copied())
291 .ok_or_else(|| {
292 anyhow::anyhow!(
293 "Surface reports no supported alpha modes for adapter {:?}",
294 context.adapter.get_info().name
295 )
296 })
297 };
298
299 let transparent_alpha_mode = pick_alpha_mode(&[
300 wgpu::CompositeAlphaMode::PreMultiplied,
301 wgpu::CompositeAlphaMode::Inherit,
302 ])?;
303
304 let opaque_alpha_mode = pick_alpha_mode(&[
305 wgpu::CompositeAlphaMode::Opaque,
306 wgpu::CompositeAlphaMode::Inherit,
307 ])?;
308
309 let alpha_mode = if config.transparent {
310 transparent_alpha_mode
311 } else {
312 opaque_alpha_mode
313 };
314
315 let device = Arc::clone(&context.device);
316 let max_texture_size = device.limits().max_texture_dimension_2d;
317
318 let requested_width = config.size.width.0 as u32;
319 let requested_height = config.size.height.0 as u32;
320 let clamped_width = requested_width.min(max_texture_size);
321 let clamped_height = requested_height.min(max_texture_size);
322
323 if clamped_width != requested_width || clamped_height != requested_height {
324 warn!(
325 "Requested surface size ({}, {}) exceeds maximum texture dimension {}. \
326 Clamping to ({}, {}). Window content may not fill the entire window.",
327 requested_width, requested_height, max_texture_size, clamped_width, clamped_height
328 );
329 }
330
331 let surface_config = wgpu::SurfaceConfiguration {
332 usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
333 format: surface_format,
334 width: clamped_width.max(1),
335 height: clamped_height.max(1),
336 present_mode: config
337 .preferred_present_mode
338 .filter(|mode| surface_caps.present_modes.contains(mode))
339 .unwrap_or(wgpu::PresentMode::Fifo),
340 desired_maximum_frame_latency: 2,
341 alpha_mode,
342 view_formats: vec![],
343 };
344 // Configure the surface immediately. The adapter selection process already validated
345 // that this adapter can successfully configure this surface.
346 surface.configure(&context.device, &surface_config);
347
348 let queue = Arc::clone(&context.queue);
349 let dual_source_blending = context.supports_dual_source_blending();
350
351 let rendering_params = RenderingParameters::new(&context.adapter, surface_format);
352 let bind_group_layouts = Self::create_bind_group_layouts(&device);
353 let pipelines = Self::create_pipelines(
354 &device,
355 &bind_group_layouts,
356 surface_format,
357 alpha_mode,
358 rendering_params.path_sample_count,
359 dual_source_blending,
360 );
361
362 let atlas_sampler = device.create_sampler(&wgpu::SamplerDescriptor {
363 label: Some("atlas_sampler"),
364 mag_filter: wgpu::FilterMode::Linear,
365 min_filter: wgpu::FilterMode::Linear,
366 ..Default::default()
367 });
368
369 let uniform_alignment = device.limits().min_uniform_buffer_offset_alignment as u64;
370 let globals_size = std::mem::size_of::<GlobalParams>() as u64;
371 let gamma_size = std::mem::size_of::<GammaParams>() as u64;
372 let path_globals_offset = globals_size.next_multiple_of(uniform_alignment);
373 let gamma_offset = (path_globals_offset + globals_size).next_multiple_of(uniform_alignment);
374
375 let globals_buffer = device.create_buffer(&wgpu::BufferDescriptor {
376 label: Some("globals_buffer"),
377 size: gamma_offset + gamma_size,
378 usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
379 mapped_at_creation: false,
380 });
381
382 let max_buffer_size = device.limits().max_buffer_size;
383 let storage_buffer_alignment = device.limits().min_storage_buffer_offset_alignment as u64;
384 let initial_instance_buffer_capacity = 2 * 1024 * 1024;
385 let instance_buffer = device.create_buffer(&wgpu::BufferDescriptor {
386 label: Some("instance_buffer"),
387 size: initial_instance_buffer_capacity,
388 usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
389 mapped_at_creation: false,
390 });
391
392 let globals_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
393 label: Some("globals_bind_group"),
394 layout: &bind_group_layouts.globals,
395 entries: &[
396 wgpu::BindGroupEntry {
397 binding: 0,
398 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
399 buffer: &globals_buffer,
400 offset: 0,
401 size: Some(NonZeroU64::new(globals_size).unwrap()),
402 }),
403 },
404 wgpu::BindGroupEntry {
405 binding: 1,
406 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
407 buffer: &globals_buffer,
408 offset: gamma_offset,
409 size: Some(NonZeroU64::new(gamma_size).unwrap()),
410 }),
411 },
412 ],
413 });
414
415 let path_globals_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
416 label: Some("path_globals_bind_group"),
417 layout: &bind_group_layouts.globals,
418 entries: &[
419 wgpu::BindGroupEntry {
420 binding: 0,
421 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
422 buffer: &globals_buffer,
423 offset: path_globals_offset,
424 size: Some(NonZeroU64::new(globals_size).unwrap()),
425 }),
426 },
427 wgpu::BindGroupEntry {
428 binding: 1,
429 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
430 buffer: &globals_buffer,
431 offset: gamma_offset,
432 size: Some(NonZeroU64::new(gamma_size).unwrap()),
433 }),
434 },
435 ],
436 });
437
438 let adapter_info = context.adapter.get_info();
439
440 let last_error: Arc<Mutex<Option<String>>> = Arc::new(Mutex::new(None));
441 let last_error_clone = Arc::clone(&last_error);
442 device.on_uncaptured_error(Arc::new(move |error| {
443 let mut guard = last_error_clone.lock().unwrap();
444 *guard = Some(error.to_string());
445 }));
446
447 let resources = WgpuResources {
448 device,
449 queue,
450 surface,
451 pipelines,
452 bind_group_layouts,
453 atlas_sampler,
454 globals_buffer,
455 globals_bind_group,
456 path_globals_bind_group,
457 instance_buffer,
458 // Defer intermediate texture creation to first draw call via ensure_intermediate_textures().
459 // This avoids panics when the device/surface is in an invalid state during initialization.
460 path_intermediate_texture: None,
461 path_intermediate_view: None,
462 path_msaa_texture: None,
463 path_msaa_view: None,
464 };
465
466 Ok(Self {
467 context: gpu_context,
468 compositor_gpu,
469 resources: Some(resources),
470 surface_config,
471 atlas,
472 path_globals_offset,
473 gamma_offset,
474 instance_buffer_capacity: initial_instance_buffer_capacity,
475 max_buffer_size,
476 storage_buffer_alignment,
477 rendering_params,
478 dual_source_blending,
479 adapter_info,
480 transparent_alpha_mode,
481 opaque_alpha_mode,
482 max_texture_size,
483 last_error,
484 failed_frame_count: 0,
485 device_lost: context.device_lost_flag(),
486 surface_configured: true,
487 needs_redraw: false,
488 })
489 }
490
491 fn create_bind_group_layouts(device: &wgpu::Device) -> WgpuBindGroupLayouts {
492 let globals =
493 device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
494 label: Some("globals_layout"),
495 entries: &[
496 wgpu::BindGroupLayoutEntry {
497 binding: 0,
498 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
499 ty: wgpu::BindingType::Buffer {
500 ty: wgpu::BufferBindingType::Uniform,
501 has_dynamic_offset: false,
502 min_binding_size: NonZeroU64::new(
503 std::mem::size_of::<GlobalParams>() as u64
504 ),
505 },
506 count: None,
507 },
508 wgpu::BindGroupLayoutEntry {
509 binding: 1,
510 visibility: wgpu::ShaderStages::FRAGMENT,
511 ty: wgpu::BindingType::Buffer {
512 ty: wgpu::BufferBindingType::Uniform,
513 has_dynamic_offset: false,
514 min_binding_size: NonZeroU64::new(
515 std::mem::size_of::<GammaParams>() as u64
516 ),
517 },
518 count: None,
519 },
520 ],
521 });
522
523 let storage_buffer_entry = |binding: u32| wgpu::BindGroupLayoutEntry {
524 binding,
525 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
526 ty: wgpu::BindingType::Buffer {
527 ty: wgpu::BufferBindingType::Storage { read_only: true },
528 has_dynamic_offset: false,
529 min_binding_size: None,
530 },
531 count: None,
532 };
533
534 let instances = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
535 label: Some("instances_layout"),
536 entries: &[storage_buffer_entry(0)],
537 });
538
539 let instances_with_texture =
540 device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
541 label: Some("instances_with_texture_layout"),
542 entries: &[
543 storage_buffer_entry(0),
544 wgpu::BindGroupLayoutEntry {
545 binding: 1,
546 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
547 ty: wgpu::BindingType::Texture {
548 sample_type: wgpu::TextureSampleType::Float { filterable: true },
549 view_dimension: wgpu::TextureViewDimension::D2,
550 multisampled: false,
551 },
552 count: None,
553 },
554 wgpu::BindGroupLayoutEntry {
555 binding: 2,
556 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
557 ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
558 count: None,
559 },
560 ],
561 });
562
563 let surfaces = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
564 label: Some("surfaces_layout"),
565 entries: &[
566 wgpu::BindGroupLayoutEntry {
567 binding: 0,
568 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
569 ty: wgpu::BindingType::Buffer {
570 ty: wgpu::BufferBindingType::Uniform,
571 has_dynamic_offset: false,
572 min_binding_size: NonZeroU64::new(
573 std::mem::size_of::<SurfaceParams>() as u64
574 ),
575 },
576 count: None,
577 },
578 wgpu::BindGroupLayoutEntry {
579 binding: 1,
580 visibility: wgpu::ShaderStages::FRAGMENT,
581 ty: wgpu::BindingType::Texture {
582 sample_type: wgpu::TextureSampleType::Float { filterable: true },
583 view_dimension: wgpu::TextureViewDimension::D2,
584 multisampled: false,
585 },
586 count: None,
587 },
588 wgpu::BindGroupLayoutEntry {
589 binding: 2,
590 visibility: wgpu::ShaderStages::FRAGMENT,
591 ty: wgpu::BindingType::Texture {
592 sample_type: wgpu::TextureSampleType::Float { filterable: true },
593 view_dimension: wgpu::TextureViewDimension::D2,
594 multisampled: false,
595 },
596 count: None,
597 },
598 wgpu::BindGroupLayoutEntry {
599 binding: 3,
600 visibility: wgpu::ShaderStages::FRAGMENT,
601 ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
602 count: None,
603 },
604 ],
605 });
606
607 WgpuBindGroupLayouts {
608 globals,
609 instances,
610 instances_with_texture,
611 surfaces,
612 }
613 }
614
615 fn create_pipelines(
616 device: &wgpu::Device,
617 layouts: &WgpuBindGroupLayouts,
618 surface_format: wgpu::TextureFormat,
619 alpha_mode: wgpu::CompositeAlphaMode,
620 path_sample_count: u32,
621 dual_source_blending: bool,
622 ) -> WgpuPipelines {
623 // Diagnostic guard: verify the device actually has
624 // DUAL_SOURCE_BLENDING. We have a crash report (ZED-5G1) where a
625 // feature mismatch caused a wgpu-hal abort, but we haven't
626 // identified the code path that produces the mismatch. This
627 // guard prevents the crash and logs more evidence.
628 // Remove this check once:
629 // a) We find and fix the root cause, or
630 // b) There are no reports of this warning appearing for some time.
631 let device_has_feature = device
632 .features()
633 .contains(wgpu::Features::DUAL_SOURCE_BLENDING);
634 if dual_source_blending && !device_has_feature {
635 log::error!(
636 "BUG: dual_source_blending flag is true but device does not \
637 have DUAL_SOURCE_BLENDING enabled (device features: {:?}). \
638 Falling back to mono text rendering. Please report this at \
639 https://github.com/zed-industries/zed/issues",
640 device.features(),
641 );
642 }
643 let dual_source_blending = dual_source_blending && device_has_feature;
644
645 let base_shader_source = include_str!("shaders.wgsl");
646 let shader_module = device.create_shader_module(wgpu::ShaderModuleDescriptor {
647 label: Some("gpui_shaders"),
648 source: wgpu::ShaderSource::Wgsl(std::borrow::Cow::Borrowed(base_shader_source)),
649 });
650
651 let subpixel_shader_source = include_str!("shaders_subpixel.wgsl");
652 let subpixel_shader_module = if dual_source_blending {
653 let combined = format!(
654 "enable dual_source_blending;\n{base_shader_source}\n{subpixel_shader_source}"
655 );
656 Some(device.create_shader_module(wgpu::ShaderModuleDescriptor {
657 label: Some("gpui_subpixel_shaders"),
658 source: wgpu::ShaderSource::Wgsl(std::borrow::Cow::Owned(combined)),
659 }))
660 } else {
661 None
662 };
663
664 let blend_mode = match alpha_mode {
665 wgpu::CompositeAlphaMode::PreMultiplied => {
666 wgpu::BlendState::PREMULTIPLIED_ALPHA_BLENDING
667 }
668 _ => wgpu::BlendState::ALPHA_BLENDING,
669 };
670
671 let color_target = wgpu::ColorTargetState {
672 format: surface_format,
673 blend: Some(blend_mode),
674 write_mask: wgpu::ColorWrites::ALL,
675 };
676
677 let create_pipeline = |name: &str,
678 vs_entry: &str,
679 fs_entry: &str,
680 globals_layout: &wgpu::BindGroupLayout,
681 data_layout: &wgpu::BindGroupLayout,
682 topology: wgpu::PrimitiveTopology,
683 color_targets: &[Option<wgpu::ColorTargetState>],
684 sample_count: u32,
685 module: &wgpu::ShaderModule| {
686 let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
687 label: Some(&format!("{name}_layout")),
688 bind_group_layouts: &[Some(globals_layout), Some(data_layout)],
689 immediate_size: 0,
690 });
691
692 device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
693 label: Some(name),
694 layout: Some(&pipeline_layout),
695 vertex: wgpu::VertexState {
696 module,
697 entry_point: Some(vs_entry),
698 buffers: &[],
699 compilation_options: wgpu::PipelineCompilationOptions::default(),
700 },
701 fragment: Some(wgpu::FragmentState {
702 module,
703 entry_point: Some(fs_entry),
704 targets: color_targets,
705 compilation_options: wgpu::PipelineCompilationOptions::default(),
706 }),
707 primitive: wgpu::PrimitiveState {
708 topology,
709 strip_index_format: None,
710 front_face: wgpu::FrontFace::Ccw,
711 cull_mode: None,
712 polygon_mode: wgpu::PolygonMode::Fill,
713 unclipped_depth: false,
714 conservative: false,
715 },
716 depth_stencil: None,
717 multisample: wgpu::MultisampleState {
718 count: sample_count,
719 mask: !0,
720 alpha_to_coverage_enabled: false,
721 },
722 multiview_mask: None,
723 cache: None,
724 })
725 };
726
727 let quads = create_pipeline(
728 "quads",
729 "vs_quad",
730 "fs_quad",
731 &layouts.globals,
732 &layouts.instances,
733 wgpu::PrimitiveTopology::TriangleStrip,
734 &[Some(color_target.clone())],
735 1,
736 &shader_module,
737 );
738
739 let shadows = create_pipeline(
740 "shadows",
741 "vs_shadow",
742 "fs_shadow",
743 &layouts.globals,
744 &layouts.instances,
745 wgpu::PrimitiveTopology::TriangleStrip,
746 &[Some(color_target.clone())],
747 1,
748 &shader_module,
749 );
750
751 let path_rasterization = create_pipeline(
752 "path_rasterization",
753 "vs_path_rasterization",
754 "fs_path_rasterization",
755 &layouts.globals,
756 &layouts.instances,
757 wgpu::PrimitiveTopology::TriangleList,
758 &[Some(wgpu::ColorTargetState {
759 format: surface_format,
760 blend: Some(wgpu::BlendState::PREMULTIPLIED_ALPHA_BLENDING),
761 write_mask: wgpu::ColorWrites::ALL,
762 })],
763 path_sample_count,
764 &shader_module,
765 );
766
767 let paths_blend = wgpu::BlendState {
768 color: wgpu::BlendComponent {
769 src_factor: wgpu::BlendFactor::One,
770 dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
771 operation: wgpu::BlendOperation::Add,
772 },
773 alpha: wgpu::BlendComponent {
774 src_factor: wgpu::BlendFactor::One,
775 dst_factor: wgpu::BlendFactor::One,
776 operation: wgpu::BlendOperation::Add,
777 },
778 };
779
780 let paths = create_pipeline(
781 "paths",
782 "vs_path",
783 "fs_path",
784 &layouts.globals,
785 &layouts.instances_with_texture,
786 wgpu::PrimitiveTopology::TriangleStrip,
787 &[Some(wgpu::ColorTargetState {
788 format: surface_format,
789 blend: Some(paths_blend),
790 write_mask: wgpu::ColorWrites::ALL,
791 })],
792 1,
793 &shader_module,
794 );
795
796 let underlines = create_pipeline(
797 "underlines",
798 "vs_underline",
799 "fs_underline",
800 &layouts.globals,
801 &layouts.instances,
802 wgpu::PrimitiveTopology::TriangleStrip,
803 &[Some(color_target.clone())],
804 1,
805 &shader_module,
806 );
807
808 let mono_sprites = create_pipeline(
809 "mono_sprites",
810 "vs_mono_sprite",
811 "fs_mono_sprite",
812 &layouts.globals,
813 &layouts.instances_with_texture,
814 wgpu::PrimitiveTopology::TriangleStrip,
815 &[Some(color_target.clone())],
816 1,
817 &shader_module,
818 );
819
820 let subpixel_sprites = if let Some(subpixel_module) = &subpixel_shader_module {
821 let subpixel_blend = wgpu::BlendState {
822 color: wgpu::BlendComponent {
823 src_factor: wgpu::BlendFactor::Src1,
824 dst_factor: wgpu::BlendFactor::OneMinusSrc1,
825 operation: wgpu::BlendOperation::Add,
826 },
827 alpha: wgpu::BlendComponent {
828 src_factor: wgpu::BlendFactor::One,
829 dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
830 operation: wgpu::BlendOperation::Add,
831 },
832 };
833
834 Some(create_pipeline(
835 "subpixel_sprites",
836 "vs_subpixel_sprite",
837 "fs_subpixel_sprite",
838 &layouts.globals,
839 &layouts.instances_with_texture,
840 wgpu::PrimitiveTopology::TriangleStrip,
841 &[Some(wgpu::ColorTargetState {
842 format: surface_format,
843 blend: Some(subpixel_blend),
844 write_mask: wgpu::ColorWrites::COLOR,
845 })],
846 1,
847 subpixel_module,
848 ))
849 } else {
850 None
851 };
852
853 let poly_sprites = create_pipeline(
854 "poly_sprites",
855 "vs_poly_sprite",
856 "fs_poly_sprite",
857 &layouts.globals,
858 &layouts.instances_with_texture,
859 wgpu::PrimitiveTopology::TriangleStrip,
860 &[Some(color_target.clone())],
861 1,
862 &shader_module,
863 );
864
865 let surfaces = create_pipeline(
866 "surfaces",
867 "vs_surface",
868 "fs_surface",
869 &layouts.globals,
870 &layouts.surfaces,
871 wgpu::PrimitiveTopology::TriangleStrip,
872 &[Some(color_target)],
873 1,
874 &shader_module,
875 );
876
877 WgpuPipelines {
878 quads,
879 shadows,
880 path_rasterization,
881 paths,
882 underlines,
883 mono_sprites,
884 subpixel_sprites,
885 poly_sprites,
886 surfaces,
887 }
888 }
889
890 fn create_path_intermediate(
891 device: &wgpu::Device,
892 format: wgpu::TextureFormat,
893 width: u32,
894 height: u32,
895 ) -> (wgpu::Texture, wgpu::TextureView) {
896 let texture = device.create_texture(&wgpu::TextureDescriptor {
897 label: Some("path_intermediate"),
898 size: wgpu::Extent3d {
899 width: width.max(1),
900 height: height.max(1),
901 depth_or_array_layers: 1,
902 },
903 mip_level_count: 1,
904 sample_count: 1,
905 dimension: wgpu::TextureDimension::D2,
906 format,
907 usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::TEXTURE_BINDING,
908 view_formats: &[],
909 });
910 let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
911 (texture, view)
912 }
913
914 fn create_msaa_if_needed(
915 device: &wgpu::Device,
916 format: wgpu::TextureFormat,
917 width: u32,
918 height: u32,
919 sample_count: u32,
920 ) -> Option<(wgpu::Texture, wgpu::TextureView)> {
921 if sample_count <= 1 {
922 return None;
923 }
924 let texture = device.create_texture(&wgpu::TextureDescriptor {
925 label: Some("path_msaa"),
926 size: wgpu::Extent3d {
927 width: width.max(1),
928 height: height.max(1),
929 depth_or_array_layers: 1,
930 },
931 mip_level_count: 1,
932 sample_count,
933 dimension: wgpu::TextureDimension::D2,
934 format,
935 usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
936 view_formats: &[],
937 });
938 let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
939 Some((texture, view))
940 }
941
942 pub fn update_drawable_size(&mut self, size: Size<DevicePixels>) {
943 let width = size.width.0 as u32;
944 let height = size.height.0 as u32;
945
946 if width != self.surface_config.width || height != self.surface_config.height {
947 let clamped_width = width.min(self.max_texture_size);
948 let clamped_height = height.min(self.max_texture_size);
949
950 if clamped_width != width || clamped_height != height {
951 warn!(
952 "Requested surface size ({}, {}) exceeds maximum texture dimension {}. \
953 Clamping to ({}, {}). Window content may not fill the entire window.",
954 width, height, self.max_texture_size, clamped_width, clamped_height
955 );
956 }
957
958 self.surface_config.width = clamped_width.max(1);
959 self.surface_config.height = clamped_height.max(1);
960 let surface_config = self.surface_config.clone();
961
962 let resources = self.resources_mut();
963
964 // Wait for any in-flight GPU work to complete before destroying textures
965 if let Err(e) = resources.device.poll(wgpu::PollType::Wait {
966 submission_index: None,
967 timeout: None,
968 }) {
969 warn!("Failed to poll device during resize: {e:?}");
970 }
971
972 // Destroy old textures before allocating new ones to avoid GPU memory spikes
973 if let Some(ref texture) = resources.path_intermediate_texture {
974 texture.destroy();
975 }
976 if let Some(ref texture) = resources.path_msaa_texture {
977 texture.destroy();
978 }
979
980 resources
981 .surface
982 .configure(&resources.device, &surface_config);
983
984 // Invalidate intermediate textures - they will be lazily recreated
985 // in draw() after we confirm the surface is healthy. This avoids
986 // panics when the device/surface is in an invalid state during resize.
987 resources.invalidate_intermediate_textures();
988 }
989 }
990
991 fn ensure_intermediate_textures(&mut self) {
992 if self.resources().path_intermediate_texture.is_some() {
993 return;
994 }
995
996 let format = self.surface_config.format;
997 let width = self.surface_config.width;
998 let height = self.surface_config.height;
999 let path_sample_count = self.rendering_params.path_sample_count;
1000 let resources = self.resources_mut();
1001
1002 let (t, v) = Self::create_path_intermediate(&resources.device, format, width, height);
1003 resources.path_intermediate_texture = Some(t);
1004 resources.path_intermediate_view = Some(v);
1005
1006 let (path_msaa_texture, path_msaa_view) = Self::create_msaa_if_needed(
1007 &resources.device,
1008 format,
1009 width,
1010 height,
1011 path_sample_count,
1012 )
1013 .map(|(t, v)| (Some(t), Some(v)))
1014 .unwrap_or((None, None));
1015 resources.path_msaa_texture = path_msaa_texture;
1016 resources.path_msaa_view = path_msaa_view;
1017 }
1018
1019 pub fn update_transparency(&mut self, transparent: bool) {
1020 let new_alpha_mode = if transparent {
1021 self.transparent_alpha_mode
1022 } else {
1023 self.opaque_alpha_mode
1024 };
1025
1026 if new_alpha_mode != self.surface_config.alpha_mode {
1027 self.surface_config.alpha_mode = new_alpha_mode;
1028 let surface_config = self.surface_config.clone();
1029 let path_sample_count = self.rendering_params.path_sample_count;
1030 let dual_source_blending = self.dual_source_blending;
1031 let resources = self.resources_mut();
1032 resources
1033 .surface
1034 .configure(&resources.device, &surface_config);
1035 resources.pipelines = Self::create_pipelines(
1036 &resources.device,
1037 &resources.bind_group_layouts,
1038 surface_config.format,
1039 surface_config.alpha_mode,
1040 path_sample_count,
1041 dual_source_blending,
1042 );
1043 }
1044 }
1045
1046 #[allow(dead_code)]
1047 pub fn viewport_size(&self) -> Size<DevicePixels> {
1048 Size {
1049 width: DevicePixels(self.surface_config.width as i32),
1050 height: DevicePixels(self.surface_config.height as i32),
1051 }
1052 }
1053
1054 pub fn sprite_atlas(&self) -> &Arc<WgpuAtlas> {
1055 &self.atlas
1056 }
1057
1058 pub fn supports_dual_source_blending(&self) -> bool {
1059 self.dual_source_blending
1060 }
1061
1062 pub fn gpu_specs(&self) -> GpuSpecs {
1063 GpuSpecs {
1064 is_software_emulated: self.adapter_info.device_type == wgpu::DeviceType::Cpu,
1065 device_name: self.adapter_info.name.clone(),
1066 driver_name: self.adapter_info.driver.clone(),
1067 driver_info: self.adapter_info.driver_info.clone(),
1068 }
1069 }
1070
1071 pub fn max_texture_size(&self) -> u32 {
1072 self.max_texture_size
1073 }
1074
1075 pub fn draw(&mut self, scene: &Scene) {
1076 // Bail out early if the surface has been unconfigured (e.g. during
1077 // Android background/rotation transitions). Attempting to acquire
1078 // a texture from an unconfigured surface can block indefinitely on
1079 // some drivers (Adreno).
1080 if !self.surface_configured {
1081 return;
1082 }
1083
1084 let last_error = self.last_error.lock().unwrap().take();
1085 if let Some(error) = last_error {
1086 self.failed_frame_count += 1;
1087 log::error!(
1088 "GPU error during frame (failure {} of 10): {error}",
1089 self.failed_frame_count
1090 );
1091
1092 // TBD. Does retrying more actually help?
1093 if self.failed_frame_count > 5 {
1094 if let Some(res) = self.resources.as_mut() {
1095 res.invalidate_intermediate_textures();
1096 }
1097 self.atlas.clear();
1098 self.needs_redraw = true;
1099 return;
1100 } else if self.failed_frame_count > 10 {
1101 panic!("Too many consecutive GPU errors. Last error: {error}");
1102 }
1103 } else {
1104 self.failed_frame_count = 0;
1105 }
1106
1107 self.atlas.before_frame();
1108
1109 let frame = match self.resources().surface.get_current_texture() {
1110 wgpu::CurrentSurfaceTexture::Success(frame) => frame,
1111 wgpu::CurrentSurfaceTexture::Suboptimal(frame) => {
1112 // Textures must be destroyed before the surface can be reconfigured.
1113 drop(frame);
1114 let surface_config = self.surface_config.clone();
1115 let resources = self.resources_mut();
1116 resources
1117 .surface
1118 .configure(&resources.device, &surface_config);
1119 return;
1120 }
1121 wgpu::CurrentSurfaceTexture::Lost | wgpu::CurrentSurfaceTexture::Outdated => {
1122 let surface_config = self.surface_config.clone();
1123 let resources = self.resources_mut();
1124 resources
1125 .surface
1126 .configure(&resources.device, &surface_config);
1127 return;
1128 }
1129 wgpu::CurrentSurfaceTexture::Timeout | wgpu::CurrentSurfaceTexture::Occluded => {
1130 return;
1131 }
1132 wgpu::CurrentSurfaceTexture::Validation => {
1133 *self.last_error.lock().unwrap() =
1134 Some("Surface texture validation error".to_string());
1135 return;
1136 }
1137 };
1138
1139 // Now that we know the surface is healthy, ensure intermediate textures exist
1140 self.ensure_intermediate_textures();
1141
1142 let frame_view = frame
1143 .texture
1144 .create_view(&wgpu::TextureViewDescriptor::default());
1145
1146 let gamma_params = GammaParams {
1147 gamma_ratios: self.rendering_params.gamma_ratios,
1148 grayscale_enhanced_contrast: self.rendering_params.grayscale_enhanced_contrast,
1149 subpixel_enhanced_contrast: self.rendering_params.subpixel_enhanced_contrast,
1150 _pad: [0.0; 2],
1151 };
1152
1153 let globals = GlobalParams {
1154 viewport_size: [
1155 self.surface_config.width as f32,
1156 self.surface_config.height as f32,
1157 ],
1158 premultiplied_alpha: if self.surface_config.alpha_mode
1159 == wgpu::CompositeAlphaMode::PreMultiplied
1160 {
1161 1
1162 } else {
1163 0
1164 },
1165 pad: 0,
1166 };
1167
1168 let path_globals = GlobalParams {
1169 premultiplied_alpha: 0,
1170 ..globals
1171 };
1172
1173 {
1174 let resources = self.resources();
1175 resources.queue.write_buffer(
1176 &resources.globals_buffer,
1177 0,
1178 bytemuck::bytes_of(&globals),
1179 );
1180 resources.queue.write_buffer(
1181 &resources.globals_buffer,
1182 self.path_globals_offset,
1183 bytemuck::bytes_of(&path_globals),
1184 );
1185 resources.queue.write_buffer(
1186 &resources.globals_buffer,
1187 self.gamma_offset,
1188 bytemuck::bytes_of(&gamma_params),
1189 );
1190 }
1191
1192 loop {
1193 let mut instance_offset: u64 = 0;
1194 let mut overflow = false;
1195
1196 let mut encoder =
1197 self.resources()
1198 .device
1199 .create_command_encoder(&wgpu::CommandEncoderDescriptor {
1200 label: Some("main_encoder"),
1201 });
1202
1203 {
1204 let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
1205 label: Some("main_pass"),
1206 color_attachments: &[Some(wgpu::RenderPassColorAttachment {
1207 view: &frame_view,
1208 resolve_target: None,
1209 ops: wgpu::Operations {
1210 load: wgpu::LoadOp::Clear(wgpu::Color::TRANSPARENT),
1211 store: wgpu::StoreOp::Store,
1212 },
1213 depth_slice: None,
1214 })],
1215 depth_stencil_attachment: None,
1216 ..Default::default()
1217 });
1218
1219 for batch in scene.batches() {
1220 let ok = match batch {
1221 PrimitiveBatch::Quads(range) => {
1222 self.draw_quads(&scene.quads[range], &mut instance_offset, &mut pass)
1223 }
1224 PrimitiveBatch::Shadows(range) => self.draw_shadows(
1225 &scene.shadows[range],
1226 &mut instance_offset,
1227 &mut pass,
1228 ),
1229 PrimitiveBatch::Paths(range) => {
1230 let paths = &scene.paths[range];
1231 if paths.is_empty() {
1232 continue;
1233 }
1234
1235 drop(pass);
1236
1237 let did_draw = self.draw_paths_to_intermediate(
1238 &mut encoder,
1239 paths,
1240 &mut instance_offset,
1241 );
1242
1243 pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
1244 label: Some("main_pass_continued"),
1245 color_attachments: &[Some(wgpu::RenderPassColorAttachment {
1246 view: &frame_view,
1247 resolve_target: None,
1248 ops: wgpu::Operations {
1249 load: wgpu::LoadOp::Load,
1250 store: wgpu::StoreOp::Store,
1251 },
1252 depth_slice: None,
1253 })],
1254 depth_stencil_attachment: None,
1255 ..Default::default()
1256 });
1257
1258 if did_draw {
1259 self.draw_paths_from_intermediate(
1260 paths,
1261 &mut instance_offset,
1262 &mut pass,
1263 )
1264 } else {
1265 false
1266 }
1267 }
1268 PrimitiveBatch::Underlines(range) => self.draw_underlines(
1269 &scene.underlines[range],
1270 &mut instance_offset,
1271 &mut pass,
1272 ),
1273 PrimitiveBatch::MonochromeSprites { texture_id, range } => self
1274 .draw_monochrome_sprites(
1275 &scene.monochrome_sprites[range],
1276 texture_id,
1277 &mut instance_offset,
1278 &mut pass,
1279 ),
1280 PrimitiveBatch::SubpixelSprites { texture_id, range } => self
1281 .draw_subpixel_sprites(
1282 &scene.subpixel_sprites[range],
1283 texture_id,
1284 &mut instance_offset,
1285 &mut pass,
1286 ),
1287 PrimitiveBatch::PolychromeSprites { texture_id, range } => self
1288 .draw_polychrome_sprites(
1289 &scene.polychrome_sprites[range],
1290 texture_id,
1291 &mut instance_offset,
1292 &mut pass,
1293 ),
1294 PrimitiveBatch::Surfaces(_surfaces) => {
1295 // Surfaces are macOS-only for video playback
1296 // Not implemented for Linux/wgpu
1297 true
1298 }
1299 };
1300 if !ok {
1301 overflow = true;
1302 break;
1303 }
1304 }
1305 }
1306
1307 if overflow {
1308 drop(encoder);
1309 if self.instance_buffer_capacity >= self.max_buffer_size {
1310 log::error!(
1311 "instance buffer size grew too large: {}",
1312 self.instance_buffer_capacity
1313 );
1314 frame.present();
1315 return;
1316 }
1317 self.grow_instance_buffer();
1318 continue;
1319 }
1320
1321 self.resources()
1322 .queue
1323 .submit(std::iter::once(encoder.finish()));
1324 frame.present();
1325 return;
1326 }
1327 }
1328
1329 fn draw_quads(
1330 &self,
1331 quads: &[Quad],
1332 instance_offset: &mut u64,
1333 pass: &mut wgpu::RenderPass<'_>,
1334 ) -> bool {
1335 let data = unsafe { Self::instance_bytes(quads) };
1336 self.draw_instances(
1337 data,
1338 quads.len() as u32,
1339 &self.resources().pipelines.quads,
1340 instance_offset,
1341 pass,
1342 )
1343 }
1344
1345 fn draw_shadows(
1346 &self,
1347 shadows: &[Shadow],
1348 instance_offset: &mut u64,
1349 pass: &mut wgpu::RenderPass<'_>,
1350 ) -> bool {
1351 let data = unsafe { Self::instance_bytes(shadows) };
1352 self.draw_instances(
1353 data,
1354 shadows.len() as u32,
1355 &self.resources().pipelines.shadows,
1356 instance_offset,
1357 pass,
1358 )
1359 }
1360
1361 fn draw_underlines(
1362 &self,
1363 underlines: &[Underline],
1364 instance_offset: &mut u64,
1365 pass: &mut wgpu::RenderPass<'_>,
1366 ) -> bool {
1367 let data = unsafe { Self::instance_bytes(underlines) };
1368 self.draw_instances(
1369 data,
1370 underlines.len() as u32,
1371 &self.resources().pipelines.underlines,
1372 instance_offset,
1373 pass,
1374 )
1375 }
1376
1377 fn draw_monochrome_sprites(
1378 &self,
1379 sprites: &[MonochromeSprite],
1380 texture_id: AtlasTextureId,
1381 instance_offset: &mut u64,
1382 pass: &mut wgpu::RenderPass<'_>,
1383 ) -> bool {
1384 let tex_info = self.atlas.get_texture_info(texture_id);
1385 let data = unsafe { Self::instance_bytes(sprites) };
1386 self.draw_instances_with_texture(
1387 data,
1388 sprites.len() as u32,
1389 &tex_info.view,
1390 &self.resources().pipelines.mono_sprites,
1391 instance_offset,
1392 pass,
1393 )
1394 }
1395
1396 fn draw_subpixel_sprites(
1397 &self,
1398 sprites: &[SubpixelSprite],
1399 texture_id: AtlasTextureId,
1400 instance_offset: &mut u64,
1401 pass: &mut wgpu::RenderPass<'_>,
1402 ) -> bool {
1403 let tex_info = self.atlas.get_texture_info(texture_id);
1404 let data = unsafe { Self::instance_bytes(sprites) };
1405 let resources = self.resources();
1406 let pipeline = resources
1407 .pipelines
1408 .subpixel_sprites
1409 .as_ref()
1410 .unwrap_or(&resources.pipelines.mono_sprites);
1411 self.draw_instances_with_texture(
1412 data,
1413 sprites.len() as u32,
1414 &tex_info.view,
1415 pipeline,
1416 instance_offset,
1417 pass,
1418 )
1419 }
1420
1421 fn draw_polychrome_sprites(
1422 &self,
1423 sprites: &[PolychromeSprite],
1424 texture_id: AtlasTextureId,
1425 instance_offset: &mut u64,
1426 pass: &mut wgpu::RenderPass<'_>,
1427 ) -> bool {
1428 let tex_info = self.atlas.get_texture_info(texture_id);
1429 let data = unsafe { Self::instance_bytes(sprites) };
1430 self.draw_instances_with_texture(
1431 data,
1432 sprites.len() as u32,
1433 &tex_info.view,
1434 &self.resources().pipelines.poly_sprites,
1435 instance_offset,
1436 pass,
1437 )
1438 }
1439
1440 fn draw_instances(
1441 &self,
1442 data: &[u8],
1443 instance_count: u32,
1444 pipeline: &wgpu::RenderPipeline,
1445 instance_offset: &mut u64,
1446 pass: &mut wgpu::RenderPass<'_>,
1447 ) -> bool {
1448 if instance_count == 0 {
1449 return true;
1450 }
1451 let Some((offset, size)) = self.write_to_instance_buffer(instance_offset, data) else {
1452 return false;
1453 };
1454 let resources = self.resources();
1455 let bind_group = resources
1456 .device
1457 .create_bind_group(&wgpu::BindGroupDescriptor {
1458 label: None,
1459 layout: &resources.bind_group_layouts.instances,
1460 entries: &[wgpu::BindGroupEntry {
1461 binding: 0,
1462 resource: self.instance_binding(offset, size),
1463 }],
1464 });
1465 pass.set_pipeline(pipeline);
1466 pass.set_bind_group(0, &resources.globals_bind_group, &[]);
1467 pass.set_bind_group(1, &bind_group, &[]);
1468 pass.draw(0..4, 0..instance_count);
1469 true
1470 }
1471
1472 fn draw_instances_with_texture(
1473 &self,
1474 data: &[u8],
1475 instance_count: u32,
1476 texture_view: &wgpu::TextureView,
1477 pipeline: &wgpu::RenderPipeline,
1478 instance_offset: &mut u64,
1479 pass: &mut wgpu::RenderPass<'_>,
1480 ) -> bool {
1481 if instance_count == 0 {
1482 return true;
1483 }
1484 let Some((offset, size)) = self.write_to_instance_buffer(instance_offset, data) else {
1485 return false;
1486 };
1487 let resources = self.resources();
1488 let bind_group = resources
1489 .device
1490 .create_bind_group(&wgpu::BindGroupDescriptor {
1491 label: None,
1492 layout: &resources.bind_group_layouts.instances_with_texture,
1493 entries: &[
1494 wgpu::BindGroupEntry {
1495 binding: 0,
1496 resource: self.instance_binding(offset, size),
1497 },
1498 wgpu::BindGroupEntry {
1499 binding: 1,
1500 resource: wgpu::BindingResource::TextureView(texture_view),
1501 },
1502 wgpu::BindGroupEntry {
1503 binding: 2,
1504 resource: wgpu::BindingResource::Sampler(&resources.atlas_sampler),
1505 },
1506 ],
1507 });
1508 pass.set_pipeline(pipeline);
1509 pass.set_bind_group(0, &resources.globals_bind_group, &[]);
1510 pass.set_bind_group(1, &bind_group, &[]);
1511 pass.draw(0..4, 0..instance_count);
1512 true
1513 }
1514
1515 unsafe fn instance_bytes<T>(instances: &[T]) -> &[u8] {
1516 unsafe {
1517 std::slice::from_raw_parts(
1518 instances.as_ptr() as *const u8,
1519 std::mem::size_of_val(instances),
1520 )
1521 }
1522 }
1523
1524 fn draw_paths_from_intermediate(
1525 &self,
1526 paths: &[Path<ScaledPixels>],
1527 instance_offset: &mut u64,
1528 pass: &mut wgpu::RenderPass<'_>,
1529 ) -> bool {
1530 let first_path = &paths[0];
1531 let sprites: Vec<PathSprite> = if paths.last().map(|p| &p.order) == Some(&first_path.order)
1532 {
1533 paths
1534 .iter()
1535 .map(|p| PathSprite {
1536 bounds: p.clipped_bounds(),
1537 })
1538 .collect()
1539 } else {
1540 let mut bounds = first_path.clipped_bounds();
1541 for path in paths.iter().skip(1) {
1542 bounds = bounds.union(&path.clipped_bounds());
1543 }
1544 vec![PathSprite { bounds }]
1545 };
1546
1547 let resources = self.resources();
1548 let Some(path_intermediate_view) = resources.path_intermediate_view.as_ref() else {
1549 return true;
1550 };
1551
1552 let sprite_data = unsafe { Self::instance_bytes(&sprites) };
1553 self.draw_instances_with_texture(
1554 sprite_data,
1555 sprites.len() as u32,
1556 path_intermediate_view,
1557 &resources.pipelines.paths,
1558 instance_offset,
1559 pass,
1560 )
1561 }
1562
1563 fn draw_paths_to_intermediate(
1564 &self,
1565 encoder: &mut wgpu::CommandEncoder,
1566 paths: &[Path<ScaledPixels>],
1567 instance_offset: &mut u64,
1568 ) -> bool {
1569 let mut vertices = Vec::new();
1570 for path in paths {
1571 let bounds = path.clipped_bounds();
1572 vertices.extend(path.vertices.iter().map(|v| PathRasterizationVertex {
1573 xy_position: v.xy_position,
1574 st_position: v.st_position,
1575 color: path.color,
1576 bounds,
1577 }));
1578 }
1579
1580 if vertices.is_empty() {
1581 return true;
1582 }
1583
1584 let vertex_data = unsafe { Self::instance_bytes(&vertices) };
1585 let Some((vertex_offset, vertex_size)) =
1586 self.write_to_instance_buffer(instance_offset, vertex_data)
1587 else {
1588 return false;
1589 };
1590
1591 let resources = self.resources();
1592 let data_bind_group = resources
1593 .device
1594 .create_bind_group(&wgpu::BindGroupDescriptor {
1595 label: Some("path_rasterization_bind_group"),
1596 layout: &resources.bind_group_layouts.instances,
1597 entries: &[wgpu::BindGroupEntry {
1598 binding: 0,
1599 resource: self.instance_binding(vertex_offset, vertex_size),
1600 }],
1601 });
1602
1603 let Some(path_intermediate_view) = resources.path_intermediate_view.as_ref() else {
1604 return true;
1605 };
1606
1607 let (target_view, resolve_target) = if let Some(ref msaa_view) = resources.path_msaa_view {
1608 (msaa_view, Some(path_intermediate_view))
1609 } else {
1610 (path_intermediate_view, None)
1611 };
1612
1613 {
1614 let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
1615 label: Some("path_rasterization_pass"),
1616 color_attachments: &[Some(wgpu::RenderPassColorAttachment {
1617 view: target_view,
1618 resolve_target,
1619 ops: wgpu::Operations {
1620 load: wgpu::LoadOp::Clear(wgpu::Color::TRANSPARENT),
1621 store: wgpu::StoreOp::Store,
1622 },
1623 depth_slice: None,
1624 })],
1625 depth_stencil_attachment: None,
1626 ..Default::default()
1627 });
1628
1629 pass.set_pipeline(&resources.pipelines.path_rasterization);
1630 pass.set_bind_group(0, &resources.path_globals_bind_group, &[]);
1631 pass.set_bind_group(1, &data_bind_group, &[]);
1632 pass.draw(0..vertices.len() as u32, 0..1);
1633 }
1634
1635 true
1636 }
1637
1638 fn grow_instance_buffer(&mut self) {
1639 let new_capacity = (self.instance_buffer_capacity * 2).min(self.max_buffer_size);
1640 log::info!("increased instance buffer size to {}", new_capacity);
1641 let resources = self.resources_mut();
1642 resources.instance_buffer = resources.device.create_buffer(&wgpu::BufferDescriptor {
1643 label: Some("instance_buffer"),
1644 size: new_capacity,
1645 usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
1646 mapped_at_creation: false,
1647 });
1648 self.instance_buffer_capacity = new_capacity;
1649 }
1650
1651 fn write_to_instance_buffer(
1652 &self,
1653 instance_offset: &mut u64,
1654 data: &[u8],
1655 ) -> Option<(u64, NonZeroU64)> {
1656 let offset = (*instance_offset).next_multiple_of(self.storage_buffer_alignment);
1657 let size = (data.len() as u64).max(16);
1658 if offset + size > self.instance_buffer_capacity {
1659 return None;
1660 }
1661 let resources = self.resources();
1662 resources
1663 .queue
1664 .write_buffer(&resources.instance_buffer, offset, data);
1665 *instance_offset = offset + size;
1666 Some((offset, NonZeroU64::new(size).expect("size is at least 16")))
1667 }
1668
1669 fn instance_binding(&self, offset: u64, size: NonZeroU64) -> wgpu::BindingResource<'_> {
1670 wgpu::BindingResource::Buffer(wgpu::BufferBinding {
1671 buffer: &self.resources().instance_buffer,
1672 offset,
1673 size: Some(size),
1674 })
1675 }
1676
1677 /// Mark the surface as unconfigured so rendering is skipped until a new
1678 /// surface is provided via [`replace_surface`](Self::replace_surface).
1679 ///
1680 /// This does **not** drop the renderer — the device, queue, atlas, and
1681 /// pipelines stay alive. Use this when the native window is destroyed
1682 /// (e.g. Android `TerminateWindow`) but you intend to re-create the
1683 /// surface later without losing cached atlas textures.
1684 pub fn unconfigure_surface(&mut self) {
1685 self.surface_configured = false;
1686 // Drop intermediate textures since they reference the old surface size.
1687 if let Some(res) = self.resources.as_mut() {
1688 res.invalidate_intermediate_textures();
1689 }
1690 }
1691
1692 /// Replace the wgpu surface with a new one (e.g. after Android destroys
1693 /// and recreates the native window). Keeps the device, queue, atlas, and
1694 /// all pipelines intact so cached `AtlasTextureId`s remain valid.
1695 ///
1696 /// The `instance` **must** be the same [`wgpu::Instance`] that was used to
1697 /// create the adapter and device (i.e. from the [`WgpuContext`]). Using a
1698 /// different instance will cause a "Device does not exist" panic because
1699 /// the wgpu device is bound to its originating instance.
1700 #[cfg(not(target_family = "wasm"))]
1701 pub fn replace_surface<W: HasWindowHandle>(
1702 &mut self,
1703 window: &W,
1704 config: WgpuSurfaceConfig,
1705 instance: &wgpu::Instance,
1706 ) -> anyhow::Result<()> {
1707 let window_handle = window
1708 .window_handle()
1709 .map_err(|e| anyhow::anyhow!("Failed to get window handle: {e}"))?;
1710
1711 let surface = create_surface(instance, window_handle.as_raw())?;
1712
1713 let width = (config.size.width.0 as u32).max(1);
1714 let height = (config.size.height.0 as u32).max(1);
1715
1716 let alpha_mode = if config.transparent {
1717 self.transparent_alpha_mode
1718 } else {
1719 self.opaque_alpha_mode
1720 };
1721
1722 self.surface_config.width = width;
1723 self.surface_config.height = height;
1724 self.surface_config.alpha_mode = alpha_mode;
1725 if let Some(mode) = config.preferred_present_mode {
1726 self.surface_config.present_mode = mode;
1727 }
1728
1729 {
1730 let res = self
1731 .resources
1732 .as_mut()
1733 .expect("GPU resources not available");
1734 surface.configure(&res.device, &self.surface_config);
1735 res.surface = surface;
1736
1737 // Invalidate intermediate textures — they'll be recreated lazily.
1738 res.invalidate_intermediate_textures();
1739 }
1740
1741 self.surface_configured = true;
1742
1743 Ok(())
1744 }
1745
1746 pub fn destroy(&mut self) {
1747 // Release surface-bound GPU resources eagerly so the underlying native
1748 // window can be destroyed before the renderer itself is dropped.
1749 self.resources.take();
1750 }
1751
1752 /// Returns true if the GPU device was lost and recovery is needed.
1753 pub fn device_lost(&self) -> bool {
1754 self.device_lost.load(std::sync::atomic::Ordering::SeqCst)
1755 }
1756
1757 /// Returns true if a redraw is needed because GPU state was cleared.
1758 /// Calling this method clears the flag.
1759 pub fn needs_redraw(&mut self) -> bool {
1760 std::mem::take(&mut self.needs_redraw)
1761 }
1762
1763 /// Recovers from a lost GPU device by recreating the renderer with a new context.
1764 ///
1765 /// Call this after detecting `device_lost()` returns true.
1766 ///
1767 /// This method coordinates recovery across multiple windows:
1768 /// - The first window to call this will recreate the shared context
1769 /// - Subsequent windows will adopt the already-recovered context
1770 #[cfg(not(target_family = "wasm"))]
1771 pub fn recover<W>(&mut self, window: &W) -> anyhow::Result<()>
1772 where
1773 W: HasWindowHandle + HasDisplayHandle + std::fmt::Debug + Send + Sync + Clone + 'static,
1774 {
1775 let gpu_context = self.context.as_ref().expect("recover requires gpu_context");
1776
1777 // Check if another window already recovered the context
1778 let needs_new_context = gpu_context
1779 .borrow()
1780 .as_ref()
1781 .is_none_or(|ctx| ctx.device_lost());
1782
1783 let window_handle = window
1784 .window_handle()
1785 .map_err(|e| anyhow::anyhow!("Failed to get window handle: {e}"))?;
1786
1787 let surface = if needs_new_context {
1788 log::warn!("GPU device lost, recreating context...");
1789
1790 // Drop old resources to release Arc<Device>/Arc<Queue> and GPU resources
1791 self.resources = None;
1792 *gpu_context.borrow_mut() = None;
1793
1794 // Wait for GPU driver to stabilize (350ms copied from windows :shrug:)
1795 std::thread::sleep(std::time::Duration::from_millis(350));
1796
1797 let instance = WgpuContext::instance(Box::new(window.clone()));
1798 let surface = create_surface(&instance, window_handle.as_raw())?;
1799 let new_context = WgpuContext::new(instance, &surface, self.compositor_gpu)?;
1800 *gpu_context.borrow_mut() = Some(new_context);
1801 surface
1802 } else {
1803 let ctx_ref = gpu_context.borrow();
1804 let instance = &ctx_ref.as_ref().unwrap().instance;
1805 create_surface(instance, window_handle.as_raw())?
1806 };
1807
1808 let config = WgpuSurfaceConfig {
1809 size: gpui::Size {
1810 width: gpui::DevicePixels(self.surface_config.width as i32),
1811 height: gpui::DevicePixels(self.surface_config.height as i32),
1812 },
1813 transparent: self.surface_config.alpha_mode != wgpu::CompositeAlphaMode::Opaque,
1814 preferred_present_mode: Some(self.surface_config.present_mode),
1815 };
1816 let gpu_context = Rc::clone(gpu_context);
1817 let ctx_ref = gpu_context.borrow();
1818 let context = ctx_ref.as_ref().expect("context should exist");
1819
1820 self.resources = None;
1821 self.atlas.handle_device_lost(context);
1822
1823 *self = Self::new_internal(
1824 Some(gpu_context.clone()),
1825 context,
1826 surface,
1827 config,
1828 self.compositor_gpu,
1829 self.atlas.clone(),
1830 )?;
1831
1832 log::info!("GPU recovery complete");
1833 Ok(())
1834 }
1835}
1836
1837#[cfg(not(target_family = "wasm"))]
1838fn create_surface(
1839 instance: &wgpu::Instance,
1840 raw_window_handle: raw_window_handle::RawWindowHandle,
1841) -> anyhow::Result<wgpu::Surface<'static>> {
1842 unsafe {
1843 instance
1844 .create_surface_unsafe(wgpu::SurfaceTargetUnsafe::RawHandle {
1845 // Fall back to the display handle already provided via InstanceDescriptor::display.
1846 raw_display_handle: None,
1847 raw_window_handle,
1848 })
1849 .map_err(|e| anyhow::anyhow!("{e}"))
1850 }
1851}
1852
1853struct RenderingParameters {
1854 path_sample_count: u32,
1855 gamma_ratios: [f32; 4],
1856 grayscale_enhanced_contrast: f32,
1857 subpixel_enhanced_contrast: f32,
1858}
1859
1860impl RenderingParameters {
1861 fn new(adapter: &wgpu::Adapter, surface_format: wgpu::TextureFormat) -> Self {
1862 use std::env;
1863
1864 let format_features = adapter.get_texture_format_features(surface_format);
1865 let path_sample_count = [4, 2, 1]
1866 .into_iter()
1867 .find(|&n| format_features.flags.sample_count_supported(n))
1868 .unwrap_or(1);
1869
1870 let gamma = env::var("ZED_FONTS_GAMMA")
1871 .ok()
1872 .and_then(|v| v.parse().ok())
1873 .unwrap_or(1.8_f32)
1874 .clamp(1.0, 2.2);
1875 let gamma_ratios = get_gamma_correction_ratios(gamma);
1876
1877 let grayscale_enhanced_contrast = env::var("ZED_FONTS_GRAYSCALE_ENHANCED_CONTRAST")
1878 .ok()
1879 .and_then(|v| v.parse().ok())
1880 .unwrap_or(1.0_f32)
1881 .max(0.0);
1882
1883 let subpixel_enhanced_contrast = env::var("ZED_FONTS_SUBPIXEL_ENHANCED_CONTRAST")
1884 .ok()
1885 .and_then(|v| v.parse().ok())
1886 .unwrap_or(0.5_f32)
1887 .max(0.0);
1888
1889 Self {
1890 path_sample_count,
1891 gamma_ratios,
1892 grayscale_enhanced_contrast,
1893 subpixel_enhanced_contrast,
1894 }
1895 }
1896}