1use crate::{CompositorGpuHint, WgpuAtlas, WgpuContext};
2use bytemuck::{Pod, Zeroable};
3use gpui::{
4 AtlasTextureId, Background, Bounds, DevicePixels, GpuSpecs, MonochromeSprite, Path, Point,
5 PolychromeSprite, PrimitiveBatch, Quad, ScaledPixels, Scene, Shadow, Size, SubpixelSprite,
6 Underline, get_gamma_correction_ratios,
7};
8use log::warn;
9#[cfg(not(target_family = "wasm"))]
10use raw_window_handle::{HasDisplayHandle, HasWindowHandle};
11use std::cell::RefCell;
12use std::num::NonZeroU64;
13use std::rc::Rc;
14use std::sync::{Arc, Mutex};
15
16#[repr(C)]
17#[derive(Clone, Copy, Pod, Zeroable)]
18struct GlobalParams {
19 viewport_size: [f32; 2],
20 premultiplied_alpha: u32,
21 pad: u32,
22}
23
24#[repr(C)]
25#[derive(Clone, Copy, Pod, Zeroable)]
26struct PodBounds {
27 origin: [f32; 2],
28 size: [f32; 2],
29}
30
31impl From<Bounds<ScaledPixels>> for PodBounds {
32 fn from(bounds: Bounds<ScaledPixels>) -> Self {
33 Self {
34 origin: [bounds.origin.x.0, bounds.origin.y.0],
35 size: [bounds.size.width.0, bounds.size.height.0],
36 }
37 }
38}
39
40#[repr(C)]
41#[derive(Clone, Copy, Pod, Zeroable)]
42struct SurfaceParams {
43 bounds: PodBounds,
44 content_mask: PodBounds,
45}
46
47#[repr(C)]
48#[derive(Clone, Copy, Pod, Zeroable)]
49struct GammaParams {
50 gamma_ratios: [f32; 4],
51 grayscale_enhanced_contrast: f32,
52 subpixel_enhanced_contrast: f32,
53 _pad: [f32; 2],
54}
55
56#[derive(Clone, Debug)]
57#[repr(C)]
58struct PathSprite {
59 bounds: Bounds<ScaledPixels>,
60}
61
62#[derive(Clone, Debug)]
63#[repr(C)]
64struct PathRasterizationVertex {
65 xy_position: Point<ScaledPixels>,
66 st_position: Point<f32>,
67 color: Background,
68 bounds: Bounds<ScaledPixels>,
69}
70
71pub struct WgpuSurfaceConfig {
72 pub size: Size<DevicePixels>,
73 pub transparent: bool,
74 /// Preferred presentation mode. When `Some`, the renderer will use this
75 /// mode if supported by the surface, falling back to `Fifo`.
76 /// When `None`, defaults to `Fifo` (VSync).
77 ///
78 /// Mobile platforms may prefer `Mailbox` (triple-buffering) to avoid
79 /// blocking in `get_current_texture()` during lifecycle transitions.
80 pub preferred_present_mode: Option<wgpu::PresentMode>,
81}
82
83struct WgpuPipelines {
84 quads: wgpu::RenderPipeline,
85 shadows: wgpu::RenderPipeline,
86 path_rasterization: wgpu::RenderPipeline,
87 paths: wgpu::RenderPipeline,
88 underlines: wgpu::RenderPipeline,
89 mono_sprites: wgpu::RenderPipeline,
90 subpixel_sprites: Option<wgpu::RenderPipeline>,
91 poly_sprites: wgpu::RenderPipeline,
92 #[allow(dead_code)]
93 surfaces: wgpu::RenderPipeline,
94}
95
96struct WgpuBindGroupLayouts {
97 globals: wgpu::BindGroupLayout,
98 instances: wgpu::BindGroupLayout,
99 instances_with_texture: wgpu::BindGroupLayout,
100 surfaces: wgpu::BindGroupLayout,
101}
102
103/// Shared GPU context reference, used to coordinate device recovery across multiple windows.
104pub type GpuContext = Rc<RefCell<Option<WgpuContext>>>;
105
106/// GPU resources that must be dropped together during device recovery.
107struct WgpuResources {
108 device: Arc<wgpu::Device>,
109 queue: Arc<wgpu::Queue>,
110 surface: wgpu::Surface<'static>,
111 pipelines: WgpuPipelines,
112 bind_group_layouts: WgpuBindGroupLayouts,
113 atlas_sampler: wgpu::Sampler,
114 globals_buffer: wgpu::Buffer,
115 globals_bind_group: wgpu::BindGroup,
116 path_globals_bind_group: wgpu::BindGroup,
117 instance_buffer: wgpu::Buffer,
118 path_intermediate_texture: Option<wgpu::Texture>,
119 path_intermediate_view: Option<wgpu::TextureView>,
120 path_msaa_texture: Option<wgpu::Texture>,
121 path_msaa_view: Option<wgpu::TextureView>,
122}
123
124pub struct WgpuRenderer {
125 /// Shared GPU context for device recovery coordination (unused on WASM).
126 #[allow(dead_code)]
127 context: Option<GpuContext>,
128 /// Compositor GPU hint for adapter selection (unused on WASM).
129 #[allow(dead_code)]
130 compositor_gpu: Option<CompositorGpuHint>,
131 resources: Option<WgpuResources>,
132 surface_config: wgpu::SurfaceConfiguration,
133 atlas: Arc<WgpuAtlas>,
134 path_globals_offset: u64,
135 gamma_offset: u64,
136 instance_buffer_capacity: u64,
137 max_buffer_size: u64,
138 storage_buffer_alignment: u64,
139 rendering_params: RenderingParameters,
140 dual_source_blending: bool,
141 adapter_info: wgpu::AdapterInfo,
142 transparent_alpha_mode: wgpu::CompositeAlphaMode,
143 opaque_alpha_mode: wgpu::CompositeAlphaMode,
144 max_texture_size: u32,
145 last_error: Arc<Mutex<Option<String>>>,
146 failed_frame_count: u32,
147 device_lost: std::sync::Arc<std::sync::atomic::AtomicBool>,
148 surface_configured: bool,
149}
150
151impl WgpuRenderer {
152 fn resources(&self) -> &WgpuResources {
153 self.resources
154 .as_ref()
155 .expect("GPU resources not available")
156 }
157
158 fn resources_mut(&mut self) -> &mut WgpuResources {
159 self.resources
160 .as_mut()
161 .expect("GPU resources not available")
162 }
163
164 fn select_color_atlas_texture_format(
165 adapter: &wgpu::Adapter,
166 ) -> anyhow::Result<wgpu::TextureFormat> {
167 let required_usages = wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST;
168 let bgra_features = adapter.get_texture_format_features(wgpu::TextureFormat::Bgra8Unorm);
169 if bgra_features.allowed_usages.contains(required_usages) {
170 return Ok(wgpu::TextureFormat::Bgra8Unorm);
171 }
172
173 let rgba_features = adapter.get_texture_format_features(wgpu::TextureFormat::Rgba8Unorm);
174 if rgba_features.allowed_usages.contains(required_usages) {
175 let info = adapter.get_info();
176 warn!(
177 "Adapter {} ({:?}) does not support Bgra8Unorm atlas textures with usages {:?}; \
178 falling back to Rgba8Unorm atlas textures.",
179 info.name, info.backend, required_usages,
180 );
181 return Ok(wgpu::TextureFormat::Rgba8Unorm);
182 }
183
184 let info = adapter.get_info();
185 Err(anyhow::anyhow!(
186 "Adapter {} ({:?}, device={:#06x}) does not support a usable color atlas texture \
187 format with usages {:?}. Bgra8Unorm allowed usages: {:?}; \
188 Rgba8Unorm allowed usages: {:?}.",
189 info.name,
190 info.backend,
191 info.device,
192 required_usages,
193 bgra_features.allowed_usages,
194 rgba_features.allowed_usages,
195 ))
196 }
197
198 /// Creates a new WgpuRenderer from raw window handles.
199 ///
200 /// The `gpu_context` is a shared reference that coordinates GPU context across
201 /// multiple windows. The first window to create a renderer will initialize the
202 /// context; subsequent windows will share it.
203 ///
204 /// # Safety
205 /// The caller must ensure that the window handle remains valid for the lifetime
206 /// of the returned renderer.
207 #[cfg(not(target_family = "wasm"))]
208 pub fn new<W>(
209 gpu_context: GpuContext,
210 window: &W,
211 config: WgpuSurfaceConfig,
212 compositor_gpu: Option<CompositorGpuHint>,
213 ) -> anyhow::Result<Self>
214 where
215 W: HasWindowHandle + HasDisplayHandle + std::fmt::Debug + Send + Sync + Clone + 'static,
216 {
217 let window_handle = window
218 .window_handle()
219 .map_err(|e| anyhow::anyhow!("Failed to get window handle: {e}"))?;
220
221 let target = wgpu::SurfaceTargetUnsafe::RawHandle {
222 // Fall back to the display handle already provided via InstanceDescriptor::display.
223 raw_display_handle: None,
224 raw_window_handle: window_handle.as_raw(),
225 };
226
227 // Use the existing context's instance if available, otherwise create a new one.
228 // The surface must be created with the same instance that will be used for
229 // adapter selection, otherwise wgpu will panic.
230 let instance = gpu_context
231 .borrow()
232 .as_ref()
233 .map(|ctx| ctx.instance.clone())
234 .unwrap_or_else(|| WgpuContext::instance(Box::new(window.clone())));
235
236 // Safety: The caller guarantees that the window handle is valid for the
237 // lifetime of this renderer. In practice, the RawWindow struct is created
238 // from the native window handles and the surface is dropped before the window.
239 let surface = unsafe {
240 instance
241 .create_surface_unsafe(target)
242 .map_err(|e| anyhow::anyhow!("Failed to create surface: {e}"))?
243 };
244
245 let mut ctx_ref = gpu_context.borrow_mut();
246 let context = match ctx_ref.as_mut() {
247 Some(context) => {
248 context.check_compatible_with_surface(&surface)?;
249 context
250 }
251 None => ctx_ref.insert(WgpuContext::new(instance, &surface, compositor_gpu)?),
252 };
253
254 let color_atlas_texture_format = Self::select_color_atlas_texture_format(&context.adapter)?;
255 let atlas = Arc::new(WgpuAtlas::new(
256 Arc::clone(&context.device),
257 Arc::clone(&context.queue),
258 color_atlas_texture_format,
259 ));
260
261 Self::new_internal(
262 Some(Rc::clone(&gpu_context)),
263 context,
264 surface,
265 config,
266 compositor_gpu,
267 atlas,
268 )
269 }
270
271 #[cfg(target_family = "wasm")]
272 pub fn new_from_canvas(
273 context: &WgpuContext,
274 canvas: &web_sys::HtmlCanvasElement,
275 config: WgpuSurfaceConfig,
276 ) -> anyhow::Result<Self> {
277 let surface = context
278 .instance
279 .create_surface(wgpu::SurfaceTarget::Canvas(canvas.clone()))
280 .map_err(|e| anyhow::anyhow!("Failed to create surface: {e}"))?;
281
282 let color_atlas_texture_format = Self::select_color_atlas_texture_format(&context.adapter)?;
283 let atlas = Arc::new(WgpuAtlas::new(
284 Arc::clone(&context.device),
285 Arc::clone(&context.queue),
286 color_atlas_texture_format,
287 ));
288
289 Self::new_internal(None, context, surface, config, None, atlas)
290 }
291
292 fn new_internal(
293 gpu_context: Option<GpuContext>,
294 context: &WgpuContext,
295 surface: wgpu::Surface<'static>,
296 config: WgpuSurfaceConfig,
297 compositor_gpu: Option<CompositorGpuHint>,
298 atlas: Arc<WgpuAtlas>,
299 ) -> anyhow::Result<Self> {
300 let surface_caps = surface.get_capabilities(&context.adapter);
301 let preferred_formats = [
302 wgpu::TextureFormat::Bgra8Unorm,
303 wgpu::TextureFormat::Rgba8Unorm,
304 ];
305 let surface_format = preferred_formats
306 .iter()
307 .find(|f| surface_caps.formats.contains(f))
308 .copied()
309 .or_else(|| surface_caps.formats.iter().find(|f| !f.is_srgb()).copied())
310 .or_else(|| surface_caps.formats.first().copied())
311 .ok_or_else(|| {
312 anyhow::anyhow!(
313 "Surface reports no supported texture formats for adapter {:?}",
314 context.adapter.get_info().name
315 )
316 })?;
317
318 let pick_alpha_mode =
319 |preferences: &[wgpu::CompositeAlphaMode]| -> anyhow::Result<wgpu::CompositeAlphaMode> {
320 preferences
321 .iter()
322 .find(|p| surface_caps.alpha_modes.contains(p))
323 .copied()
324 .or_else(|| surface_caps.alpha_modes.first().copied())
325 .ok_or_else(|| {
326 anyhow::anyhow!(
327 "Surface reports no supported alpha modes for adapter {:?}",
328 context.adapter.get_info().name
329 )
330 })
331 };
332
333 let transparent_alpha_mode = pick_alpha_mode(&[
334 wgpu::CompositeAlphaMode::PreMultiplied,
335 wgpu::CompositeAlphaMode::Inherit,
336 ])?;
337
338 let opaque_alpha_mode = pick_alpha_mode(&[
339 wgpu::CompositeAlphaMode::Opaque,
340 wgpu::CompositeAlphaMode::Inherit,
341 ])?;
342
343 let alpha_mode = if config.transparent {
344 transparent_alpha_mode
345 } else {
346 opaque_alpha_mode
347 };
348
349 let device = Arc::clone(&context.device);
350 let max_texture_size = device.limits().max_texture_dimension_2d;
351
352 let requested_width = config.size.width.0 as u32;
353 let requested_height = config.size.height.0 as u32;
354 let clamped_width = requested_width.min(max_texture_size);
355 let clamped_height = requested_height.min(max_texture_size);
356
357 if clamped_width != requested_width || clamped_height != requested_height {
358 warn!(
359 "Requested surface size ({}, {}) exceeds maximum texture dimension {}. \
360 Clamping to ({}, {}). Window content may not fill the entire window.",
361 requested_width, requested_height, max_texture_size, clamped_width, clamped_height
362 );
363 }
364
365 let surface_config = wgpu::SurfaceConfiguration {
366 usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
367 format: surface_format,
368 width: clamped_width.max(1),
369 height: clamped_height.max(1),
370 present_mode: config
371 .preferred_present_mode
372 .filter(|mode| surface_caps.present_modes.contains(mode))
373 .unwrap_or(wgpu::PresentMode::Fifo),
374 desired_maximum_frame_latency: 2,
375 alpha_mode,
376 view_formats: vec![],
377 };
378 // Configure the surface immediately. The adapter selection process already validated
379 // that this adapter can successfully configure this surface.
380 surface.configure(&context.device, &surface_config);
381
382 let queue = Arc::clone(&context.queue);
383 let dual_source_blending = context.supports_dual_source_blending();
384
385 let rendering_params = RenderingParameters::new(&context.adapter, surface_format);
386 let bind_group_layouts = Self::create_bind_group_layouts(&device);
387 let pipelines = Self::create_pipelines(
388 &device,
389 &bind_group_layouts,
390 surface_format,
391 alpha_mode,
392 rendering_params.path_sample_count,
393 dual_source_blending,
394 );
395
396 let atlas_sampler = device.create_sampler(&wgpu::SamplerDescriptor {
397 label: Some("atlas_sampler"),
398 mag_filter: wgpu::FilterMode::Linear,
399 min_filter: wgpu::FilterMode::Linear,
400 ..Default::default()
401 });
402
403 let uniform_alignment = device.limits().min_uniform_buffer_offset_alignment as u64;
404 let globals_size = std::mem::size_of::<GlobalParams>() as u64;
405 let gamma_size = std::mem::size_of::<GammaParams>() as u64;
406 let path_globals_offset = globals_size.next_multiple_of(uniform_alignment);
407 let gamma_offset = (path_globals_offset + globals_size).next_multiple_of(uniform_alignment);
408
409 let globals_buffer = device.create_buffer(&wgpu::BufferDescriptor {
410 label: Some("globals_buffer"),
411 size: gamma_offset + gamma_size,
412 usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
413 mapped_at_creation: false,
414 });
415
416 let max_buffer_size = device.limits().max_buffer_size;
417 let storage_buffer_alignment = device.limits().min_storage_buffer_offset_alignment as u64;
418 let initial_instance_buffer_capacity = 2 * 1024 * 1024;
419 let instance_buffer = device.create_buffer(&wgpu::BufferDescriptor {
420 label: Some("instance_buffer"),
421 size: initial_instance_buffer_capacity,
422 usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
423 mapped_at_creation: false,
424 });
425
426 let globals_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
427 label: Some("globals_bind_group"),
428 layout: &bind_group_layouts.globals,
429 entries: &[
430 wgpu::BindGroupEntry {
431 binding: 0,
432 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
433 buffer: &globals_buffer,
434 offset: 0,
435 size: Some(NonZeroU64::new(globals_size).unwrap()),
436 }),
437 },
438 wgpu::BindGroupEntry {
439 binding: 1,
440 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
441 buffer: &globals_buffer,
442 offset: gamma_offset,
443 size: Some(NonZeroU64::new(gamma_size).unwrap()),
444 }),
445 },
446 ],
447 });
448
449 let path_globals_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
450 label: Some("path_globals_bind_group"),
451 layout: &bind_group_layouts.globals,
452 entries: &[
453 wgpu::BindGroupEntry {
454 binding: 0,
455 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
456 buffer: &globals_buffer,
457 offset: path_globals_offset,
458 size: Some(NonZeroU64::new(globals_size).unwrap()),
459 }),
460 },
461 wgpu::BindGroupEntry {
462 binding: 1,
463 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
464 buffer: &globals_buffer,
465 offset: gamma_offset,
466 size: Some(NonZeroU64::new(gamma_size).unwrap()),
467 }),
468 },
469 ],
470 });
471
472 let adapter_info = context.adapter.get_info();
473
474 let last_error: Arc<Mutex<Option<String>>> = Arc::new(Mutex::new(None));
475 let last_error_clone = Arc::clone(&last_error);
476 device.on_uncaptured_error(Arc::new(move |error| {
477 let mut guard = last_error_clone.lock().unwrap();
478 *guard = Some(error.to_string());
479 }));
480
481 let resources = WgpuResources {
482 device,
483 queue,
484 surface,
485 pipelines,
486 bind_group_layouts,
487 atlas_sampler,
488 globals_buffer,
489 globals_bind_group,
490 path_globals_bind_group,
491 instance_buffer,
492 // Defer intermediate texture creation to first draw call via ensure_intermediate_textures().
493 // This avoids panics when the device/surface is in an invalid state during initialization.
494 path_intermediate_texture: None,
495 path_intermediate_view: None,
496 path_msaa_texture: None,
497 path_msaa_view: None,
498 };
499
500 Ok(Self {
501 context: gpu_context,
502 compositor_gpu,
503 resources: Some(resources),
504 surface_config,
505 atlas,
506 path_globals_offset,
507 gamma_offset,
508 instance_buffer_capacity: initial_instance_buffer_capacity,
509 max_buffer_size,
510 storage_buffer_alignment,
511 rendering_params,
512 dual_source_blending,
513 adapter_info,
514 transparent_alpha_mode,
515 opaque_alpha_mode,
516 max_texture_size,
517 last_error,
518 failed_frame_count: 0,
519 device_lost: context.device_lost_flag(),
520 surface_configured: true,
521 })
522 }
523
524 fn create_bind_group_layouts(device: &wgpu::Device) -> WgpuBindGroupLayouts {
525 let globals =
526 device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
527 label: Some("globals_layout"),
528 entries: &[
529 wgpu::BindGroupLayoutEntry {
530 binding: 0,
531 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
532 ty: wgpu::BindingType::Buffer {
533 ty: wgpu::BufferBindingType::Uniform,
534 has_dynamic_offset: false,
535 min_binding_size: NonZeroU64::new(
536 std::mem::size_of::<GlobalParams>() as u64
537 ),
538 },
539 count: None,
540 },
541 wgpu::BindGroupLayoutEntry {
542 binding: 1,
543 visibility: wgpu::ShaderStages::FRAGMENT,
544 ty: wgpu::BindingType::Buffer {
545 ty: wgpu::BufferBindingType::Uniform,
546 has_dynamic_offset: false,
547 min_binding_size: NonZeroU64::new(
548 std::mem::size_of::<GammaParams>() as u64
549 ),
550 },
551 count: None,
552 },
553 ],
554 });
555
556 let storage_buffer_entry = |binding: u32| wgpu::BindGroupLayoutEntry {
557 binding,
558 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
559 ty: wgpu::BindingType::Buffer {
560 ty: wgpu::BufferBindingType::Storage { read_only: true },
561 has_dynamic_offset: false,
562 min_binding_size: None,
563 },
564 count: None,
565 };
566
567 let instances = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
568 label: Some("instances_layout"),
569 entries: &[storage_buffer_entry(0)],
570 });
571
572 let instances_with_texture =
573 device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
574 label: Some("instances_with_texture_layout"),
575 entries: &[
576 storage_buffer_entry(0),
577 wgpu::BindGroupLayoutEntry {
578 binding: 1,
579 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
580 ty: wgpu::BindingType::Texture {
581 sample_type: wgpu::TextureSampleType::Float { filterable: true },
582 view_dimension: wgpu::TextureViewDimension::D2,
583 multisampled: false,
584 },
585 count: None,
586 },
587 wgpu::BindGroupLayoutEntry {
588 binding: 2,
589 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
590 ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
591 count: None,
592 },
593 ],
594 });
595
596 let surfaces = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
597 label: Some("surfaces_layout"),
598 entries: &[
599 wgpu::BindGroupLayoutEntry {
600 binding: 0,
601 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
602 ty: wgpu::BindingType::Buffer {
603 ty: wgpu::BufferBindingType::Uniform,
604 has_dynamic_offset: false,
605 min_binding_size: NonZeroU64::new(
606 std::mem::size_of::<SurfaceParams>() as u64
607 ),
608 },
609 count: None,
610 },
611 wgpu::BindGroupLayoutEntry {
612 binding: 1,
613 visibility: wgpu::ShaderStages::FRAGMENT,
614 ty: wgpu::BindingType::Texture {
615 sample_type: wgpu::TextureSampleType::Float { filterable: true },
616 view_dimension: wgpu::TextureViewDimension::D2,
617 multisampled: false,
618 },
619 count: None,
620 },
621 wgpu::BindGroupLayoutEntry {
622 binding: 2,
623 visibility: wgpu::ShaderStages::FRAGMENT,
624 ty: wgpu::BindingType::Texture {
625 sample_type: wgpu::TextureSampleType::Float { filterable: true },
626 view_dimension: wgpu::TextureViewDimension::D2,
627 multisampled: false,
628 },
629 count: None,
630 },
631 wgpu::BindGroupLayoutEntry {
632 binding: 3,
633 visibility: wgpu::ShaderStages::FRAGMENT,
634 ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
635 count: None,
636 },
637 ],
638 });
639
640 WgpuBindGroupLayouts {
641 globals,
642 instances,
643 instances_with_texture,
644 surfaces,
645 }
646 }
647
648 fn create_pipelines(
649 device: &wgpu::Device,
650 layouts: &WgpuBindGroupLayouts,
651 surface_format: wgpu::TextureFormat,
652 alpha_mode: wgpu::CompositeAlphaMode,
653 path_sample_count: u32,
654 dual_source_blending: bool,
655 ) -> WgpuPipelines {
656 // Diagnostic guard: verify the device actually has
657 // DUAL_SOURCE_BLENDING. We have a crash report (ZED-5G1) where a
658 // feature mismatch caused a wgpu-hal abort, but we haven't
659 // identified the code path that produces the mismatch. This
660 // guard prevents the crash and logs more evidence.
661 // Remove this check once:
662 // a) We find and fix the root cause, or
663 // b) There are no reports of this warning appearing for some time.
664 let device_has_feature = device
665 .features()
666 .contains(wgpu::Features::DUAL_SOURCE_BLENDING);
667 if dual_source_blending && !device_has_feature {
668 log::error!(
669 "BUG: dual_source_blending flag is true but device does not \
670 have DUAL_SOURCE_BLENDING enabled (device features: {:?}). \
671 Falling back to mono text rendering. Please report this at \
672 https://github.com/zed-industries/zed/issues",
673 device.features(),
674 );
675 }
676 let dual_source_blending = dual_source_blending && device_has_feature;
677
678 let base_shader_source = include_str!("shaders.wgsl");
679 let shader_module = device.create_shader_module(wgpu::ShaderModuleDescriptor {
680 label: Some("gpui_shaders"),
681 source: wgpu::ShaderSource::Wgsl(std::borrow::Cow::Borrowed(base_shader_source)),
682 });
683
684 let subpixel_shader_source = include_str!("shaders_subpixel.wgsl");
685 let subpixel_shader_module = if dual_source_blending {
686 let combined = format!(
687 "enable dual_source_blending;\n{base_shader_source}\n{subpixel_shader_source}"
688 );
689 Some(device.create_shader_module(wgpu::ShaderModuleDescriptor {
690 label: Some("gpui_subpixel_shaders"),
691 source: wgpu::ShaderSource::Wgsl(std::borrow::Cow::Owned(combined)),
692 }))
693 } else {
694 None
695 };
696
697 let blend_mode = match alpha_mode {
698 wgpu::CompositeAlphaMode::PreMultiplied => {
699 wgpu::BlendState::PREMULTIPLIED_ALPHA_BLENDING
700 }
701 _ => wgpu::BlendState::ALPHA_BLENDING,
702 };
703
704 let color_target = wgpu::ColorTargetState {
705 format: surface_format,
706 blend: Some(blend_mode),
707 write_mask: wgpu::ColorWrites::ALL,
708 };
709
710 let create_pipeline = |name: &str,
711 vs_entry: &str,
712 fs_entry: &str,
713 globals_layout: &wgpu::BindGroupLayout,
714 data_layout: &wgpu::BindGroupLayout,
715 topology: wgpu::PrimitiveTopology,
716 color_targets: &[Option<wgpu::ColorTargetState>],
717 sample_count: u32,
718 module: &wgpu::ShaderModule| {
719 let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
720 label: Some(&format!("{name}_layout")),
721 bind_group_layouts: &[Some(globals_layout), Some(data_layout)],
722 immediate_size: 0,
723 });
724
725 device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
726 label: Some(name),
727 layout: Some(&pipeline_layout),
728 vertex: wgpu::VertexState {
729 module,
730 entry_point: Some(vs_entry),
731 buffers: &[],
732 compilation_options: wgpu::PipelineCompilationOptions::default(),
733 },
734 fragment: Some(wgpu::FragmentState {
735 module,
736 entry_point: Some(fs_entry),
737 targets: color_targets,
738 compilation_options: wgpu::PipelineCompilationOptions::default(),
739 }),
740 primitive: wgpu::PrimitiveState {
741 topology,
742 strip_index_format: None,
743 front_face: wgpu::FrontFace::Ccw,
744 cull_mode: None,
745 polygon_mode: wgpu::PolygonMode::Fill,
746 unclipped_depth: false,
747 conservative: false,
748 },
749 depth_stencil: None,
750 multisample: wgpu::MultisampleState {
751 count: sample_count,
752 mask: !0,
753 alpha_to_coverage_enabled: false,
754 },
755 multiview_mask: None,
756 cache: None,
757 })
758 };
759
760 let quads = create_pipeline(
761 "quads",
762 "vs_quad",
763 "fs_quad",
764 &layouts.globals,
765 &layouts.instances,
766 wgpu::PrimitiveTopology::TriangleStrip,
767 &[Some(color_target.clone())],
768 1,
769 &shader_module,
770 );
771
772 let shadows = create_pipeline(
773 "shadows",
774 "vs_shadow",
775 "fs_shadow",
776 &layouts.globals,
777 &layouts.instances,
778 wgpu::PrimitiveTopology::TriangleStrip,
779 &[Some(color_target.clone())],
780 1,
781 &shader_module,
782 );
783
784 let path_rasterization = create_pipeline(
785 "path_rasterization",
786 "vs_path_rasterization",
787 "fs_path_rasterization",
788 &layouts.globals,
789 &layouts.instances,
790 wgpu::PrimitiveTopology::TriangleList,
791 &[Some(wgpu::ColorTargetState {
792 format: surface_format,
793 blend: Some(wgpu::BlendState::PREMULTIPLIED_ALPHA_BLENDING),
794 write_mask: wgpu::ColorWrites::ALL,
795 })],
796 path_sample_count,
797 &shader_module,
798 );
799
800 let paths_blend = wgpu::BlendState {
801 color: wgpu::BlendComponent {
802 src_factor: wgpu::BlendFactor::One,
803 dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
804 operation: wgpu::BlendOperation::Add,
805 },
806 alpha: wgpu::BlendComponent {
807 src_factor: wgpu::BlendFactor::One,
808 dst_factor: wgpu::BlendFactor::One,
809 operation: wgpu::BlendOperation::Add,
810 },
811 };
812
813 let paths = create_pipeline(
814 "paths",
815 "vs_path",
816 "fs_path",
817 &layouts.globals,
818 &layouts.instances_with_texture,
819 wgpu::PrimitiveTopology::TriangleStrip,
820 &[Some(wgpu::ColorTargetState {
821 format: surface_format,
822 blend: Some(paths_blend),
823 write_mask: wgpu::ColorWrites::ALL,
824 })],
825 1,
826 &shader_module,
827 );
828
829 let underlines = create_pipeline(
830 "underlines",
831 "vs_underline",
832 "fs_underline",
833 &layouts.globals,
834 &layouts.instances,
835 wgpu::PrimitiveTopology::TriangleStrip,
836 &[Some(color_target.clone())],
837 1,
838 &shader_module,
839 );
840
841 let mono_sprites = create_pipeline(
842 "mono_sprites",
843 "vs_mono_sprite",
844 "fs_mono_sprite",
845 &layouts.globals,
846 &layouts.instances_with_texture,
847 wgpu::PrimitiveTopology::TriangleStrip,
848 &[Some(color_target.clone())],
849 1,
850 &shader_module,
851 );
852
853 let subpixel_sprites = if let Some(subpixel_module) = &subpixel_shader_module {
854 let subpixel_blend = wgpu::BlendState {
855 color: wgpu::BlendComponent {
856 src_factor: wgpu::BlendFactor::Src1,
857 dst_factor: wgpu::BlendFactor::OneMinusSrc1,
858 operation: wgpu::BlendOperation::Add,
859 },
860 alpha: wgpu::BlendComponent {
861 src_factor: wgpu::BlendFactor::One,
862 dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
863 operation: wgpu::BlendOperation::Add,
864 },
865 };
866
867 Some(create_pipeline(
868 "subpixel_sprites",
869 "vs_subpixel_sprite",
870 "fs_subpixel_sprite",
871 &layouts.globals,
872 &layouts.instances_with_texture,
873 wgpu::PrimitiveTopology::TriangleStrip,
874 &[Some(wgpu::ColorTargetState {
875 format: surface_format,
876 blend: Some(subpixel_blend),
877 write_mask: wgpu::ColorWrites::COLOR,
878 })],
879 1,
880 subpixel_module,
881 ))
882 } else {
883 None
884 };
885
886 let poly_sprites = create_pipeline(
887 "poly_sprites",
888 "vs_poly_sprite",
889 "fs_poly_sprite",
890 &layouts.globals,
891 &layouts.instances_with_texture,
892 wgpu::PrimitiveTopology::TriangleStrip,
893 &[Some(color_target.clone())],
894 1,
895 &shader_module,
896 );
897
898 let surfaces = create_pipeline(
899 "surfaces",
900 "vs_surface",
901 "fs_surface",
902 &layouts.globals,
903 &layouts.surfaces,
904 wgpu::PrimitiveTopology::TriangleStrip,
905 &[Some(color_target)],
906 1,
907 &shader_module,
908 );
909
910 WgpuPipelines {
911 quads,
912 shadows,
913 path_rasterization,
914 paths,
915 underlines,
916 mono_sprites,
917 subpixel_sprites,
918 poly_sprites,
919 surfaces,
920 }
921 }
922
923 fn create_path_intermediate(
924 device: &wgpu::Device,
925 format: wgpu::TextureFormat,
926 width: u32,
927 height: u32,
928 ) -> (wgpu::Texture, wgpu::TextureView) {
929 let texture = device.create_texture(&wgpu::TextureDescriptor {
930 label: Some("path_intermediate"),
931 size: wgpu::Extent3d {
932 width: width.max(1),
933 height: height.max(1),
934 depth_or_array_layers: 1,
935 },
936 mip_level_count: 1,
937 sample_count: 1,
938 dimension: wgpu::TextureDimension::D2,
939 format,
940 usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::TEXTURE_BINDING,
941 view_formats: &[],
942 });
943 let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
944 (texture, view)
945 }
946
947 fn create_msaa_if_needed(
948 device: &wgpu::Device,
949 format: wgpu::TextureFormat,
950 width: u32,
951 height: u32,
952 sample_count: u32,
953 ) -> Option<(wgpu::Texture, wgpu::TextureView)> {
954 if sample_count <= 1 {
955 return None;
956 }
957 let texture = device.create_texture(&wgpu::TextureDescriptor {
958 label: Some("path_msaa"),
959 size: wgpu::Extent3d {
960 width: width.max(1),
961 height: height.max(1),
962 depth_or_array_layers: 1,
963 },
964 mip_level_count: 1,
965 sample_count,
966 dimension: wgpu::TextureDimension::D2,
967 format,
968 usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
969 view_formats: &[],
970 });
971 let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
972 Some((texture, view))
973 }
974
975 pub fn update_drawable_size(&mut self, size: Size<DevicePixels>) {
976 let width = size.width.0 as u32;
977 let height = size.height.0 as u32;
978
979 if width != self.surface_config.width || height != self.surface_config.height {
980 let clamped_width = width.min(self.max_texture_size);
981 let clamped_height = height.min(self.max_texture_size);
982
983 if clamped_width != width || clamped_height != height {
984 warn!(
985 "Requested surface size ({}, {}) exceeds maximum texture dimension {}. \
986 Clamping to ({}, {}). Window content may not fill the entire window.",
987 width, height, self.max_texture_size, clamped_width, clamped_height
988 );
989 }
990
991 self.surface_config.width = clamped_width.max(1);
992 self.surface_config.height = clamped_height.max(1);
993 let surface_config = self.surface_config.clone();
994
995 let resources = self.resources_mut();
996
997 // Wait for any in-flight GPU work to complete before destroying textures
998 if let Err(e) = resources.device.poll(wgpu::PollType::Wait {
999 submission_index: None,
1000 timeout: None,
1001 }) {
1002 warn!("Failed to poll device during resize: {e:?}");
1003 }
1004
1005 // Destroy old textures before allocating new ones to avoid GPU memory spikes
1006 if let Some(ref texture) = resources.path_intermediate_texture {
1007 texture.destroy();
1008 }
1009 if let Some(ref texture) = resources.path_msaa_texture {
1010 texture.destroy();
1011 }
1012
1013 resources
1014 .surface
1015 .configure(&resources.device, &surface_config);
1016
1017 // Invalidate intermediate textures - they will be lazily recreated
1018 // in draw() after we confirm the surface is healthy. This avoids
1019 // panics when the device/surface is in an invalid state during resize.
1020 resources.path_intermediate_texture = None;
1021 resources.path_intermediate_view = None;
1022 resources.path_msaa_texture = None;
1023 resources.path_msaa_view = None;
1024 }
1025 }
1026
1027 fn ensure_intermediate_textures(&mut self) {
1028 if self.resources().path_intermediate_texture.is_some() {
1029 return;
1030 }
1031
1032 let format = self.surface_config.format;
1033 let width = self.surface_config.width;
1034 let height = self.surface_config.height;
1035 let path_sample_count = self.rendering_params.path_sample_count;
1036 let resources = self.resources_mut();
1037
1038 let (t, v) = Self::create_path_intermediate(&resources.device, format, width, height);
1039 resources.path_intermediate_texture = Some(t);
1040 resources.path_intermediate_view = Some(v);
1041
1042 let (path_msaa_texture, path_msaa_view) = Self::create_msaa_if_needed(
1043 &resources.device,
1044 format,
1045 width,
1046 height,
1047 path_sample_count,
1048 )
1049 .map(|(t, v)| (Some(t), Some(v)))
1050 .unwrap_or((None, None));
1051 resources.path_msaa_texture = path_msaa_texture;
1052 resources.path_msaa_view = path_msaa_view;
1053 }
1054
1055 pub fn update_transparency(&mut self, transparent: bool) {
1056 let new_alpha_mode = if transparent {
1057 self.transparent_alpha_mode
1058 } else {
1059 self.opaque_alpha_mode
1060 };
1061
1062 if new_alpha_mode != self.surface_config.alpha_mode {
1063 self.surface_config.alpha_mode = new_alpha_mode;
1064 let surface_config = self.surface_config.clone();
1065 let path_sample_count = self.rendering_params.path_sample_count;
1066 let dual_source_blending = self.dual_source_blending;
1067 let resources = self.resources_mut();
1068 resources
1069 .surface
1070 .configure(&resources.device, &surface_config);
1071 resources.pipelines = Self::create_pipelines(
1072 &resources.device,
1073 &resources.bind_group_layouts,
1074 surface_config.format,
1075 surface_config.alpha_mode,
1076 path_sample_count,
1077 dual_source_blending,
1078 );
1079 }
1080 }
1081
1082 #[allow(dead_code)]
1083 pub fn viewport_size(&self) -> Size<DevicePixels> {
1084 Size {
1085 width: DevicePixels(self.surface_config.width as i32),
1086 height: DevicePixels(self.surface_config.height as i32),
1087 }
1088 }
1089
1090 pub fn sprite_atlas(&self) -> &Arc<WgpuAtlas> {
1091 &self.atlas
1092 }
1093
1094 pub fn supports_dual_source_blending(&self) -> bool {
1095 self.dual_source_blending
1096 }
1097
1098 pub fn gpu_specs(&self) -> GpuSpecs {
1099 GpuSpecs {
1100 is_software_emulated: self.adapter_info.device_type == wgpu::DeviceType::Cpu,
1101 device_name: self.adapter_info.name.clone(),
1102 driver_name: self.adapter_info.driver.clone(),
1103 driver_info: self.adapter_info.driver_info.clone(),
1104 }
1105 }
1106
1107 pub fn max_texture_size(&self) -> u32 {
1108 self.max_texture_size
1109 }
1110
1111 pub fn draw(&mut self, scene: &Scene) {
1112 // Bail out early if the surface has been unconfigured (e.g. during
1113 // Android background/rotation transitions). Attempting to acquire
1114 // a texture from an unconfigured surface can block indefinitely on
1115 // some drivers (Adreno).
1116 if !self.surface_configured {
1117 return;
1118 }
1119
1120 let last_error = self.last_error.lock().unwrap().take();
1121 if let Some(error) = last_error {
1122 self.failed_frame_count += 1;
1123 log::error!(
1124 "GPU error during frame (failure {} of 20): {error}",
1125 self.failed_frame_count
1126 );
1127 if self.failed_frame_count > 20 {
1128 panic!("Too many consecutive GPU errors. Last error: {error}");
1129 }
1130 } else {
1131 self.failed_frame_count = 0;
1132 }
1133
1134 self.atlas.before_frame();
1135
1136 let frame = match self.resources().surface.get_current_texture() {
1137 wgpu::CurrentSurfaceTexture::Success(frame) => frame,
1138 wgpu::CurrentSurfaceTexture::Suboptimal(frame) => {
1139 // Textures must be destroyed before the surface can be reconfigured.
1140 drop(frame);
1141 let surface_config = self.surface_config.clone();
1142 let resources = self.resources_mut();
1143 resources
1144 .surface
1145 .configure(&resources.device, &surface_config);
1146 return;
1147 }
1148 wgpu::CurrentSurfaceTexture::Lost | wgpu::CurrentSurfaceTexture::Outdated => {
1149 let surface_config = self.surface_config.clone();
1150 let resources = self.resources_mut();
1151 resources
1152 .surface
1153 .configure(&resources.device, &surface_config);
1154 return;
1155 }
1156 wgpu::CurrentSurfaceTexture::Timeout | wgpu::CurrentSurfaceTexture::Occluded => {
1157 return;
1158 }
1159 wgpu::CurrentSurfaceTexture::Validation => {
1160 *self.last_error.lock().unwrap() =
1161 Some("Surface texture validation error".to_string());
1162 return;
1163 }
1164 };
1165
1166 // Now that we know the surface is healthy, ensure intermediate textures exist
1167 self.ensure_intermediate_textures();
1168
1169 let frame_view = frame
1170 .texture
1171 .create_view(&wgpu::TextureViewDescriptor::default());
1172
1173 let gamma_params = GammaParams {
1174 gamma_ratios: self.rendering_params.gamma_ratios,
1175 grayscale_enhanced_contrast: self.rendering_params.grayscale_enhanced_contrast,
1176 subpixel_enhanced_contrast: self.rendering_params.subpixel_enhanced_contrast,
1177 _pad: [0.0; 2],
1178 };
1179
1180 let globals = GlobalParams {
1181 viewport_size: [
1182 self.surface_config.width as f32,
1183 self.surface_config.height as f32,
1184 ],
1185 premultiplied_alpha: if self.surface_config.alpha_mode
1186 == wgpu::CompositeAlphaMode::PreMultiplied
1187 {
1188 1
1189 } else {
1190 0
1191 },
1192 pad: 0,
1193 };
1194
1195 let path_globals = GlobalParams {
1196 premultiplied_alpha: 0,
1197 ..globals
1198 };
1199
1200 {
1201 let resources = self.resources();
1202 resources.queue.write_buffer(
1203 &resources.globals_buffer,
1204 0,
1205 bytemuck::bytes_of(&globals),
1206 );
1207 resources.queue.write_buffer(
1208 &resources.globals_buffer,
1209 self.path_globals_offset,
1210 bytemuck::bytes_of(&path_globals),
1211 );
1212 resources.queue.write_buffer(
1213 &resources.globals_buffer,
1214 self.gamma_offset,
1215 bytemuck::bytes_of(&gamma_params),
1216 );
1217 }
1218
1219 loop {
1220 let mut instance_offset: u64 = 0;
1221 let mut overflow = false;
1222
1223 let mut encoder =
1224 self.resources()
1225 .device
1226 .create_command_encoder(&wgpu::CommandEncoderDescriptor {
1227 label: Some("main_encoder"),
1228 });
1229
1230 {
1231 let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
1232 label: Some("main_pass"),
1233 color_attachments: &[Some(wgpu::RenderPassColorAttachment {
1234 view: &frame_view,
1235 resolve_target: None,
1236 ops: wgpu::Operations {
1237 load: wgpu::LoadOp::Clear(wgpu::Color::TRANSPARENT),
1238 store: wgpu::StoreOp::Store,
1239 },
1240 depth_slice: None,
1241 })],
1242 depth_stencil_attachment: None,
1243 ..Default::default()
1244 });
1245
1246 for batch in scene.batches() {
1247 let ok = match batch {
1248 PrimitiveBatch::Quads(range) => {
1249 self.draw_quads(&scene.quads[range], &mut instance_offset, &mut pass)
1250 }
1251 PrimitiveBatch::Shadows(range) => self.draw_shadows(
1252 &scene.shadows[range],
1253 &mut instance_offset,
1254 &mut pass,
1255 ),
1256 PrimitiveBatch::Paths(range) => {
1257 let paths = &scene.paths[range];
1258 if paths.is_empty() {
1259 continue;
1260 }
1261
1262 drop(pass);
1263
1264 let did_draw = self.draw_paths_to_intermediate(
1265 &mut encoder,
1266 paths,
1267 &mut instance_offset,
1268 );
1269
1270 pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
1271 label: Some("main_pass_continued"),
1272 color_attachments: &[Some(wgpu::RenderPassColorAttachment {
1273 view: &frame_view,
1274 resolve_target: None,
1275 ops: wgpu::Operations {
1276 load: wgpu::LoadOp::Load,
1277 store: wgpu::StoreOp::Store,
1278 },
1279 depth_slice: None,
1280 })],
1281 depth_stencil_attachment: None,
1282 ..Default::default()
1283 });
1284
1285 if did_draw {
1286 self.draw_paths_from_intermediate(
1287 paths,
1288 &mut instance_offset,
1289 &mut pass,
1290 )
1291 } else {
1292 false
1293 }
1294 }
1295 PrimitiveBatch::Underlines(range) => self.draw_underlines(
1296 &scene.underlines[range],
1297 &mut instance_offset,
1298 &mut pass,
1299 ),
1300 PrimitiveBatch::MonochromeSprites { texture_id, range } => self
1301 .draw_monochrome_sprites(
1302 &scene.monochrome_sprites[range],
1303 texture_id,
1304 &mut instance_offset,
1305 &mut pass,
1306 ),
1307 PrimitiveBatch::SubpixelSprites { texture_id, range } => self
1308 .draw_subpixel_sprites(
1309 &scene.subpixel_sprites[range],
1310 texture_id,
1311 &mut instance_offset,
1312 &mut pass,
1313 ),
1314 PrimitiveBatch::PolychromeSprites { texture_id, range } => self
1315 .draw_polychrome_sprites(
1316 &scene.polychrome_sprites[range],
1317 texture_id,
1318 &mut instance_offset,
1319 &mut pass,
1320 ),
1321 PrimitiveBatch::Surfaces(_surfaces) => {
1322 // Surfaces are macOS-only for video playback
1323 // Not implemented for Linux/wgpu
1324 true
1325 }
1326 };
1327 if !ok {
1328 overflow = true;
1329 break;
1330 }
1331 }
1332 }
1333
1334 if overflow {
1335 drop(encoder);
1336 if self.instance_buffer_capacity >= self.max_buffer_size {
1337 log::error!(
1338 "instance buffer size grew too large: {}",
1339 self.instance_buffer_capacity
1340 );
1341 frame.present();
1342 return;
1343 }
1344 self.grow_instance_buffer();
1345 continue;
1346 }
1347
1348 self.resources()
1349 .queue
1350 .submit(std::iter::once(encoder.finish()));
1351 frame.present();
1352 return;
1353 }
1354 }
1355
1356 fn draw_quads(
1357 &self,
1358 quads: &[Quad],
1359 instance_offset: &mut u64,
1360 pass: &mut wgpu::RenderPass<'_>,
1361 ) -> bool {
1362 let data = unsafe { Self::instance_bytes(quads) };
1363 self.draw_instances(
1364 data,
1365 quads.len() as u32,
1366 &self.resources().pipelines.quads,
1367 instance_offset,
1368 pass,
1369 )
1370 }
1371
1372 fn draw_shadows(
1373 &self,
1374 shadows: &[Shadow],
1375 instance_offset: &mut u64,
1376 pass: &mut wgpu::RenderPass<'_>,
1377 ) -> bool {
1378 let data = unsafe { Self::instance_bytes(shadows) };
1379 self.draw_instances(
1380 data,
1381 shadows.len() as u32,
1382 &self.resources().pipelines.shadows,
1383 instance_offset,
1384 pass,
1385 )
1386 }
1387
1388 fn draw_underlines(
1389 &self,
1390 underlines: &[Underline],
1391 instance_offset: &mut u64,
1392 pass: &mut wgpu::RenderPass<'_>,
1393 ) -> bool {
1394 let data = unsafe { Self::instance_bytes(underlines) };
1395 self.draw_instances(
1396 data,
1397 underlines.len() as u32,
1398 &self.resources().pipelines.underlines,
1399 instance_offset,
1400 pass,
1401 )
1402 }
1403
1404 fn draw_monochrome_sprites(
1405 &self,
1406 sprites: &[MonochromeSprite],
1407 texture_id: AtlasTextureId,
1408 instance_offset: &mut u64,
1409 pass: &mut wgpu::RenderPass<'_>,
1410 ) -> bool {
1411 let tex_info = self.atlas.get_texture_info(texture_id);
1412 let data = unsafe { Self::instance_bytes(sprites) };
1413 self.draw_instances_with_texture(
1414 data,
1415 sprites.len() as u32,
1416 &tex_info.view,
1417 &self.resources().pipelines.mono_sprites,
1418 instance_offset,
1419 pass,
1420 )
1421 }
1422
1423 fn draw_subpixel_sprites(
1424 &self,
1425 sprites: &[SubpixelSprite],
1426 texture_id: AtlasTextureId,
1427 instance_offset: &mut u64,
1428 pass: &mut wgpu::RenderPass<'_>,
1429 ) -> bool {
1430 let tex_info = self.atlas.get_texture_info(texture_id);
1431 let data = unsafe { Self::instance_bytes(sprites) };
1432 let resources = self.resources();
1433 let pipeline = resources
1434 .pipelines
1435 .subpixel_sprites
1436 .as_ref()
1437 .unwrap_or(&resources.pipelines.mono_sprites);
1438 self.draw_instances_with_texture(
1439 data,
1440 sprites.len() as u32,
1441 &tex_info.view,
1442 pipeline,
1443 instance_offset,
1444 pass,
1445 )
1446 }
1447
1448 fn draw_polychrome_sprites(
1449 &self,
1450 sprites: &[PolychromeSprite],
1451 texture_id: AtlasTextureId,
1452 instance_offset: &mut u64,
1453 pass: &mut wgpu::RenderPass<'_>,
1454 ) -> bool {
1455 let tex_info = self.atlas.get_texture_info(texture_id);
1456 let data = unsafe { Self::instance_bytes(sprites) };
1457 self.draw_instances_with_texture(
1458 data,
1459 sprites.len() as u32,
1460 &tex_info.view,
1461 &self.resources().pipelines.poly_sprites,
1462 instance_offset,
1463 pass,
1464 )
1465 }
1466
1467 fn draw_instances(
1468 &self,
1469 data: &[u8],
1470 instance_count: u32,
1471 pipeline: &wgpu::RenderPipeline,
1472 instance_offset: &mut u64,
1473 pass: &mut wgpu::RenderPass<'_>,
1474 ) -> bool {
1475 if instance_count == 0 {
1476 return true;
1477 }
1478 let Some((offset, size)) = self.write_to_instance_buffer(instance_offset, data) else {
1479 return false;
1480 };
1481 let resources = self.resources();
1482 let bind_group = resources
1483 .device
1484 .create_bind_group(&wgpu::BindGroupDescriptor {
1485 label: None,
1486 layout: &resources.bind_group_layouts.instances,
1487 entries: &[wgpu::BindGroupEntry {
1488 binding: 0,
1489 resource: self.instance_binding(offset, size),
1490 }],
1491 });
1492 pass.set_pipeline(pipeline);
1493 pass.set_bind_group(0, &resources.globals_bind_group, &[]);
1494 pass.set_bind_group(1, &bind_group, &[]);
1495 pass.draw(0..4, 0..instance_count);
1496 true
1497 }
1498
1499 fn draw_instances_with_texture(
1500 &self,
1501 data: &[u8],
1502 instance_count: u32,
1503 texture_view: &wgpu::TextureView,
1504 pipeline: &wgpu::RenderPipeline,
1505 instance_offset: &mut u64,
1506 pass: &mut wgpu::RenderPass<'_>,
1507 ) -> bool {
1508 if instance_count == 0 {
1509 return true;
1510 }
1511 let Some((offset, size)) = self.write_to_instance_buffer(instance_offset, data) else {
1512 return false;
1513 };
1514 let resources = self.resources();
1515 let bind_group = resources
1516 .device
1517 .create_bind_group(&wgpu::BindGroupDescriptor {
1518 label: None,
1519 layout: &resources.bind_group_layouts.instances_with_texture,
1520 entries: &[
1521 wgpu::BindGroupEntry {
1522 binding: 0,
1523 resource: self.instance_binding(offset, size),
1524 },
1525 wgpu::BindGroupEntry {
1526 binding: 1,
1527 resource: wgpu::BindingResource::TextureView(texture_view),
1528 },
1529 wgpu::BindGroupEntry {
1530 binding: 2,
1531 resource: wgpu::BindingResource::Sampler(&resources.atlas_sampler),
1532 },
1533 ],
1534 });
1535 pass.set_pipeline(pipeline);
1536 pass.set_bind_group(0, &resources.globals_bind_group, &[]);
1537 pass.set_bind_group(1, &bind_group, &[]);
1538 pass.draw(0..4, 0..instance_count);
1539 true
1540 }
1541
1542 unsafe fn instance_bytes<T>(instances: &[T]) -> &[u8] {
1543 unsafe {
1544 std::slice::from_raw_parts(
1545 instances.as_ptr() as *const u8,
1546 std::mem::size_of_val(instances),
1547 )
1548 }
1549 }
1550
1551 fn draw_paths_from_intermediate(
1552 &self,
1553 paths: &[Path<ScaledPixels>],
1554 instance_offset: &mut u64,
1555 pass: &mut wgpu::RenderPass<'_>,
1556 ) -> bool {
1557 let first_path = &paths[0];
1558 let sprites: Vec<PathSprite> = if paths.last().map(|p| &p.order) == Some(&first_path.order)
1559 {
1560 paths
1561 .iter()
1562 .map(|p| PathSprite {
1563 bounds: p.clipped_bounds(),
1564 })
1565 .collect()
1566 } else {
1567 let mut bounds = first_path.clipped_bounds();
1568 for path in paths.iter().skip(1) {
1569 bounds = bounds.union(&path.clipped_bounds());
1570 }
1571 vec![PathSprite { bounds }]
1572 };
1573
1574 let resources = self.resources();
1575 let Some(path_intermediate_view) = resources.path_intermediate_view.as_ref() else {
1576 return true;
1577 };
1578
1579 let sprite_data = unsafe { Self::instance_bytes(&sprites) };
1580 self.draw_instances_with_texture(
1581 sprite_data,
1582 sprites.len() as u32,
1583 path_intermediate_view,
1584 &resources.pipelines.paths,
1585 instance_offset,
1586 pass,
1587 )
1588 }
1589
1590 fn draw_paths_to_intermediate(
1591 &self,
1592 encoder: &mut wgpu::CommandEncoder,
1593 paths: &[Path<ScaledPixels>],
1594 instance_offset: &mut u64,
1595 ) -> bool {
1596 let mut vertices = Vec::new();
1597 for path in paths {
1598 let bounds = path.clipped_bounds();
1599 vertices.extend(path.vertices.iter().map(|v| PathRasterizationVertex {
1600 xy_position: v.xy_position,
1601 st_position: v.st_position,
1602 color: path.color,
1603 bounds,
1604 }));
1605 }
1606
1607 if vertices.is_empty() {
1608 return true;
1609 }
1610
1611 let vertex_data = unsafe { Self::instance_bytes(&vertices) };
1612 let Some((vertex_offset, vertex_size)) =
1613 self.write_to_instance_buffer(instance_offset, vertex_data)
1614 else {
1615 return false;
1616 };
1617
1618 let resources = self.resources();
1619 let data_bind_group = resources
1620 .device
1621 .create_bind_group(&wgpu::BindGroupDescriptor {
1622 label: Some("path_rasterization_bind_group"),
1623 layout: &resources.bind_group_layouts.instances,
1624 entries: &[wgpu::BindGroupEntry {
1625 binding: 0,
1626 resource: self.instance_binding(vertex_offset, vertex_size),
1627 }],
1628 });
1629
1630 let Some(path_intermediate_view) = resources.path_intermediate_view.as_ref() else {
1631 return true;
1632 };
1633
1634 let (target_view, resolve_target) = if let Some(ref msaa_view) = resources.path_msaa_view {
1635 (msaa_view, Some(path_intermediate_view))
1636 } else {
1637 (path_intermediate_view, None)
1638 };
1639
1640 {
1641 let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
1642 label: Some("path_rasterization_pass"),
1643 color_attachments: &[Some(wgpu::RenderPassColorAttachment {
1644 view: target_view,
1645 resolve_target,
1646 ops: wgpu::Operations {
1647 load: wgpu::LoadOp::Clear(wgpu::Color::TRANSPARENT),
1648 store: wgpu::StoreOp::Store,
1649 },
1650 depth_slice: None,
1651 })],
1652 depth_stencil_attachment: None,
1653 ..Default::default()
1654 });
1655
1656 pass.set_pipeline(&resources.pipelines.path_rasterization);
1657 pass.set_bind_group(0, &resources.path_globals_bind_group, &[]);
1658 pass.set_bind_group(1, &data_bind_group, &[]);
1659 pass.draw(0..vertices.len() as u32, 0..1);
1660 }
1661
1662 true
1663 }
1664
1665 fn grow_instance_buffer(&mut self) {
1666 let new_capacity = (self.instance_buffer_capacity * 2).min(self.max_buffer_size);
1667 log::info!("increased instance buffer size to {}", new_capacity);
1668 let resources = self.resources_mut();
1669 resources.instance_buffer = resources.device.create_buffer(&wgpu::BufferDescriptor {
1670 label: Some("instance_buffer"),
1671 size: new_capacity,
1672 usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
1673 mapped_at_creation: false,
1674 });
1675 self.instance_buffer_capacity = new_capacity;
1676 }
1677
1678 fn write_to_instance_buffer(
1679 &self,
1680 instance_offset: &mut u64,
1681 data: &[u8],
1682 ) -> Option<(u64, NonZeroU64)> {
1683 let offset = (*instance_offset).next_multiple_of(self.storage_buffer_alignment);
1684 let size = (data.len() as u64).max(16);
1685 if offset + size > self.instance_buffer_capacity {
1686 return None;
1687 }
1688 let resources = self.resources();
1689 resources
1690 .queue
1691 .write_buffer(&resources.instance_buffer, offset, data);
1692 *instance_offset = offset + size;
1693 Some((offset, NonZeroU64::new(size).expect("size is at least 16")))
1694 }
1695
1696 fn instance_binding(&self, offset: u64, size: NonZeroU64) -> wgpu::BindingResource<'_> {
1697 wgpu::BindingResource::Buffer(wgpu::BufferBinding {
1698 buffer: &self.resources().instance_buffer,
1699 offset,
1700 size: Some(size),
1701 })
1702 }
1703
1704 /// Mark the surface as unconfigured so rendering is skipped until a new
1705 /// surface is provided via [`replace_surface`](Self::replace_surface).
1706 ///
1707 /// This does **not** drop the renderer — the device, queue, atlas, and
1708 /// pipelines stay alive. Use this when the native window is destroyed
1709 /// (e.g. Android `TerminateWindow`) but you intend to re-create the
1710 /// surface later without losing cached atlas textures.
1711 pub fn unconfigure_surface(&mut self) {
1712 self.surface_configured = false;
1713 // Drop intermediate textures since they reference the old surface size.
1714 if let Some(res) = self.resources.as_mut() {
1715 res.path_intermediate_texture = None;
1716 res.path_intermediate_view = None;
1717 res.path_msaa_texture = None;
1718 res.path_msaa_view = None;
1719 }
1720 }
1721
1722 /// Replace the wgpu surface with a new one (e.g. after Android destroys
1723 /// and recreates the native window). Keeps the device, queue, atlas, and
1724 /// all pipelines intact so cached `AtlasTextureId`s remain valid.
1725 ///
1726 /// The `instance` **must** be the same [`wgpu::Instance`] that was used to
1727 /// create the adapter and device (i.e. from the [`WgpuContext`]). Using a
1728 /// different instance will cause a "Device does not exist" panic because
1729 /// the wgpu device is bound to its originating instance.
1730 #[cfg(not(target_family = "wasm"))]
1731 pub fn replace_surface<W: HasWindowHandle>(
1732 &mut self,
1733 window: &W,
1734 config: WgpuSurfaceConfig,
1735 instance: &wgpu::Instance,
1736 ) -> anyhow::Result<()> {
1737 let window_handle = window
1738 .window_handle()
1739 .map_err(|e| anyhow::anyhow!("Failed to get window handle: {e}"))?;
1740
1741 let surface = create_surface(instance, window_handle.as_raw())?;
1742
1743 let width = (config.size.width.0 as u32).max(1);
1744 let height = (config.size.height.0 as u32).max(1);
1745
1746 let alpha_mode = if config.transparent {
1747 self.transparent_alpha_mode
1748 } else {
1749 self.opaque_alpha_mode
1750 };
1751
1752 self.surface_config.width = width;
1753 self.surface_config.height = height;
1754 self.surface_config.alpha_mode = alpha_mode;
1755 if let Some(mode) = config.preferred_present_mode {
1756 self.surface_config.present_mode = mode;
1757 }
1758
1759 {
1760 let res = self
1761 .resources
1762 .as_mut()
1763 .expect("GPU resources not available");
1764 surface.configure(&res.device, &self.surface_config);
1765 res.surface = surface;
1766
1767 // Invalidate intermediate textures — they'll be recreated lazily.
1768 res.path_intermediate_texture = None;
1769 res.path_intermediate_view = None;
1770 res.path_msaa_texture = None;
1771 res.path_msaa_view = None;
1772 }
1773
1774 self.surface_configured = true;
1775
1776 Ok(())
1777 }
1778
1779 pub fn destroy(&mut self) {
1780 // Release surface-bound GPU resources eagerly so the underlying native
1781 // window can be destroyed before the renderer itself is dropped.
1782 self.resources.take();
1783 }
1784
1785 /// Returns true if the GPU device was lost and recovery is needed.
1786 pub fn device_lost(&self) -> bool {
1787 self.device_lost.load(std::sync::atomic::Ordering::SeqCst)
1788 }
1789
1790 /// Recovers from a lost GPU device by recreating the renderer with a new context.
1791 ///
1792 /// Call this after detecting `device_lost()` returns true.
1793 ///
1794 /// This method coordinates recovery across multiple windows:
1795 /// - The first window to call this will recreate the shared context
1796 /// - Subsequent windows will adopt the already-recovered context
1797 #[cfg(not(target_family = "wasm"))]
1798 pub fn recover<W>(&mut self, window: &W) -> anyhow::Result<()>
1799 where
1800 W: HasWindowHandle + HasDisplayHandle + std::fmt::Debug + Send + Sync + Clone + 'static,
1801 {
1802 let gpu_context = self.context.as_ref().expect("recover requires gpu_context");
1803
1804 // Check if another window already recovered the context
1805 let needs_new_context = gpu_context
1806 .borrow()
1807 .as_ref()
1808 .is_none_or(|ctx| ctx.device_lost());
1809
1810 let window_handle = window
1811 .window_handle()
1812 .map_err(|e| anyhow::anyhow!("Failed to get window handle: {e}"))?;
1813
1814 let surface = if needs_new_context {
1815 log::warn!("GPU device lost, recreating context...");
1816
1817 // Drop old resources to release Arc<Device>/Arc<Queue> and GPU resources
1818 self.resources = None;
1819 *gpu_context.borrow_mut() = None;
1820
1821 // Wait for GPU driver to stabilize (350ms copied from windows :shrug:)
1822 std::thread::sleep(std::time::Duration::from_millis(350));
1823
1824 let instance = WgpuContext::instance(Box::new(window.clone()));
1825 let surface = create_surface(&instance, window_handle.as_raw())?;
1826 let new_context = WgpuContext::new(instance, &surface, self.compositor_gpu)?;
1827 *gpu_context.borrow_mut() = Some(new_context);
1828 surface
1829 } else {
1830 let ctx_ref = gpu_context.borrow();
1831 let instance = &ctx_ref.as_ref().unwrap().instance;
1832 create_surface(instance, window_handle.as_raw())?
1833 };
1834
1835 let config = WgpuSurfaceConfig {
1836 size: gpui::Size {
1837 width: gpui::DevicePixels(self.surface_config.width as i32),
1838 height: gpui::DevicePixels(self.surface_config.height as i32),
1839 },
1840 transparent: self.surface_config.alpha_mode != wgpu::CompositeAlphaMode::Opaque,
1841 preferred_present_mode: Some(self.surface_config.present_mode),
1842 };
1843 let gpu_context = Rc::clone(gpu_context);
1844 let ctx_ref = gpu_context.borrow();
1845 let context = ctx_ref.as_ref().expect("context should exist");
1846 let color_atlas_texture_format = Self::select_color_atlas_texture_format(&context.adapter)?;
1847
1848 self.resources = None;
1849 self.atlas.handle_device_lost(
1850 Arc::clone(&context.device),
1851 Arc::clone(&context.queue),
1852 color_atlas_texture_format,
1853 );
1854
1855 *self = Self::new_internal(
1856 Some(gpu_context.clone()),
1857 context,
1858 surface,
1859 config,
1860 self.compositor_gpu,
1861 self.atlas.clone(),
1862 )?;
1863
1864 log::info!("GPU recovery complete");
1865 Ok(())
1866 }
1867}
1868
1869#[cfg(not(target_family = "wasm"))]
1870fn create_surface(
1871 instance: &wgpu::Instance,
1872 raw_window_handle: raw_window_handle::RawWindowHandle,
1873) -> anyhow::Result<wgpu::Surface<'static>> {
1874 unsafe {
1875 instance
1876 .create_surface_unsafe(wgpu::SurfaceTargetUnsafe::RawHandle {
1877 // Fall back to the display handle already provided via InstanceDescriptor::display.
1878 raw_display_handle: None,
1879 raw_window_handle,
1880 })
1881 .map_err(|e| anyhow::anyhow!("{e}"))
1882 }
1883}
1884
1885struct RenderingParameters {
1886 path_sample_count: u32,
1887 gamma_ratios: [f32; 4],
1888 grayscale_enhanced_contrast: f32,
1889 subpixel_enhanced_contrast: f32,
1890}
1891
1892impl RenderingParameters {
1893 fn new(adapter: &wgpu::Adapter, surface_format: wgpu::TextureFormat) -> Self {
1894 use std::env;
1895
1896 let format_features = adapter.get_texture_format_features(surface_format);
1897 let path_sample_count = [4, 2, 1]
1898 .into_iter()
1899 .find(|&n| format_features.flags.sample_count_supported(n))
1900 .unwrap_or(1);
1901
1902 let gamma = env::var("ZED_FONTS_GAMMA")
1903 .ok()
1904 .and_then(|v| v.parse().ok())
1905 .unwrap_or(1.8_f32)
1906 .clamp(1.0, 2.2);
1907 let gamma_ratios = get_gamma_correction_ratios(gamma);
1908
1909 let grayscale_enhanced_contrast = env::var("ZED_FONTS_GRAYSCALE_ENHANCED_CONTRAST")
1910 .ok()
1911 .and_then(|v| v.parse().ok())
1912 .unwrap_or(1.0_f32)
1913 .max(0.0);
1914
1915 let subpixel_enhanced_contrast = env::var("ZED_FONTS_SUBPIXEL_ENHANCED_CONTRAST")
1916 .ok()
1917 .and_then(|v| v.parse().ok())
1918 .unwrap_or(0.5_f32)
1919 .max(0.0);
1920
1921 Self {
1922 path_sample_count,
1923 gamma_ratios,
1924 grayscale_enhanced_contrast,
1925 subpixel_enhanced_contrast,
1926 }
1927 }
1928}