1use crate::{CompositorGpuHint, WgpuAtlas, WgpuContext};
2use bytemuck::{Pod, Zeroable};
3use gpui::{
4 AtlasTextureId, Background, Bounds, DevicePixels, GpuSpecs, MonochromeSprite, Path, Point,
5 PolychromeSprite, PrimitiveBatch, Quad, ScaledPixels, Scene, Shadow, Size, SubpixelSprite,
6 Underline, get_gamma_correction_ratios,
7};
8use log::warn;
9#[cfg(not(target_family = "wasm"))]
10use raw_window_handle::{HasDisplayHandle, HasWindowHandle};
11use std::cell::RefCell;
12use std::num::NonZeroU64;
13use std::rc::Rc;
14use std::sync::{Arc, Mutex};
15
16#[repr(C)]
17#[derive(Clone, Copy, Pod, Zeroable)]
18struct GlobalParams {
19 viewport_size: [f32; 2],
20 premultiplied_alpha: u32,
21 pad: u32,
22}
23
24#[repr(C)]
25#[derive(Clone, Copy, Pod, Zeroable)]
26struct PodBounds {
27 origin: [f32; 2],
28 size: [f32; 2],
29}
30
31impl From<Bounds<ScaledPixels>> for PodBounds {
32 fn from(bounds: Bounds<ScaledPixels>) -> Self {
33 Self {
34 origin: [bounds.origin.x.0, bounds.origin.y.0],
35 size: [bounds.size.width.0, bounds.size.height.0],
36 }
37 }
38}
39
40#[repr(C)]
41#[derive(Clone, Copy, Pod, Zeroable)]
42struct SurfaceParams {
43 bounds: PodBounds,
44 content_mask: PodBounds,
45}
46
47#[repr(C)]
48#[derive(Clone, Copy, Pod, Zeroable)]
49struct GammaParams {
50 gamma_ratios: [f32; 4],
51 grayscale_enhanced_contrast: f32,
52 subpixel_enhanced_contrast: f32,
53 _pad: [f32; 2],
54}
55
56#[derive(Clone, Debug)]
57#[repr(C)]
58struct PathSprite {
59 bounds: Bounds<ScaledPixels>,
60}
61
62#[derive(Clone, Debug)]
63#[repr(C)]
64struct PathRasterizationVertex {
65 xy_position: Point<ScaledPixels>,
66 st_position: Point<f32>,
67 color: Background,
68 bounds: Bounds<ScaledPixels>,
69}
70
71pub struct WgpuSurfaceConfig {
72 pub size: Size<DevicePixels>,
73 pub transparent: bool,
74 /// Preferred presentation mode. When `Some`, the renderer will use this
75 /// mode if supported by the surface, falling back to `Fifo`.
76 /// When `None`, defaults to `Fifo` (VSync).
77 ///
78 /// Mobile platforms may prefer `Mailbox` (triple-buffering) to avoid
79 /// blocking in `get_current_texture()` during lifecycle transitions.
80 pub preferred_present_mode: Option<wgpu::PresentMode>,
81}
82
83struct WgpuPipelines {
84 quads: wgpu::RenderPipeline,
85 shadows: wgpu::RenderPipeline,
86 path_rasterization: wgpu::RenderPipeline,
87 paths: wgpu::RenderPipeline,
88 underlines: wgpu::RenderPipeline,
89 mono_sprites: wgpu::RenderPipeline,
90 subpixel_sprites: Option<wgpu::RenderPipeline>,
91 poly_sprites: wgpu::RenderPipeline,
92 #[allow(dead_code)]
93 surfaces: wgpu::RenderPipeline,
94}
95
96struct WgpuBindGroupLayouts {
97 globals: wgpu::BindGroupLayout,
98 instances: wgpu::BindGroupLayout,
99 instances_with_texture: wgpu::BindGroupLayout,
100 surfaces: wgpu::BindGroupLayout,
101}
102
103/// Shared GPU context reference, used to coordinate device recovery across multiple windows.
104pub type GpuContext = Rc<RefCell<Option<WgpuContext>>>;
105
106/// GPU resources that must be dropped together during device recovery.
107struct WgpuResources {
108 device: Arc<wgpu::Device>,
109 queue: Arc<wgpu::Queue>,
110 surface: wgpu::Surface<'static>,
111 pipelines: WgpuPipelines,
112 bind_group_layouts: WgpuBindGroupLayouts,
113 atlas_sampler: wgpu::Sampler,
114 globals_buffer: wgpu::Buffer,
115 globals_bind_group: wgpu::BindGroup,
116 path_globals_bind_group: wgpu::BindGroup,
117 instance_buffer: wgpu::Buffer,
118 path_intermediate_texture: Option<wgpu::Texture>,
119 path_intermediate_view: Option<wgpu::TextureView>,
120 path_msaa_texture: Option<wgpu::Texture>,
121 path_msaa_view: Option<wgpu::TextureView>,
122}
123
124pub struct WgpuRenderer {
125 /// Shared GPU context for device recovery coordination (unused on WASM).
126 #[allow(dead_code)]
127 context: Option<GpuContext>,
128 /// Compositor GPU hint for adapter selection (unused on WASM).
129 #[allow(dead_code)]
130 compositor_gpu: Option<CompositorGpuHint>,
131 resources: Option<WgpuResources>,
132 surface_config: wgpu::SurfaceConfiguration,
133 atlas: Arc<WgpuAtlas>,
134 path_globals_offset: u64,
135 gamma_offset: u64,
136 instance_buffer_capacity: u64,
137 max_buffer_size: u64,
138 storage_buffer_alignment: u64,
139 rendering_params: RenderingParameters,
140 dual_source_blending: bool,
141 adapter_info: wgpu::AdapterInfo,
142 transparent_alpha_mode: wgpu::CompositeAlphaMode,
143 opaque_alpha_mode: wgpu::CompositeAlphaMode,
144 max_texture_size: u32,
145 last_error: Arc<Mutex<Option<String>>>,
146 failed_frame_count: u32,
147 device_lost: std::sync::Arc<std::sync::atomic::AtomicBool>,
148 surface_configured: bool,
149}
150
151impl WgpuRenderer {
152 fn resources(&self) -> &WgpuResources {
153 self.resources
154 .as_ref()
155 .expect("GPU resources not available")
156 }
157
158 fn resources_mut(&mut self) -> &mut WgpuResources {
159 self.resources
160 .as_mut()
161 .expect("GPU resources not available")
162 }
163
164 /// Creates a new WgpuRenderer from raw window handles.
165 ///
166 /// The `gpu_context` is a shared reference that coordinates GPU context across
167 /// multiple windows. The first window to create a renderer will initialize the
168 /// context; subsequent windows will share it.
169 ///
170 /// # Safety
171 /// The caller must ensure that the window handle remains valid for the lifetime
172 /// of the returned renderer.
173 #[cfg(not(target_family = "wasm"))]
174 pub fn new<W>(
175 gpu_context: GpuContext,
176 window: &W,
177 config: WgpuSurfaceConfig,
178 compositor_gpu: Option<CompositorGpuHint>,
179 ) -> anyhow::Result<Self>
180 where
181 W: HasWindowHandle + HasDisplayHandle + std::fmt::Debug + Send + Sync + Clone + 'static,
182 {
183 let window_handle = window
184 .window_handle()
185 .map_err(|e| anyhow::anyhow!("Failed to get window handle: {e}"))?;
186
187 let target = wgpu::SurfaceTargetUnsafe::RawHandle {
188 // Fall back to the display handle already provided via InstanceDescriptor::display.
189 raw_display_handle: None,
190 raw_window_handle: window_handle.as_raw(),
191 };
192
193 // Use the existing context's instance if available, otherwise create a new one.
194 // The surface must be created with the same instance that will be used for
195 // adapter selection, otherwise wgpu will panic.
196 let instance = gpu_context
197 .borrow()
198 .as_ref()
199 .map(|ctx| ctx.instance.clone())
200 .unwrap_or_else(|| WgpuContext::instance(Box::new(window.clone())));
201
202 // Safety: The caller guarantees that the window handle is valid for the
203 // lifetime of this renderer. In practice, the RawWindow struct is created
204 // from the native window handles and the surface is dropped before the window.
205 let surface = unsafe {
206 instance
207 .create_surface_unsafe(target)
208 .map_err(|e| anyhow::anyhow!("Failed to create surface: {e}"))?
209 };
210
211 let mut ctx_ref = gpu_context.borrow_mut();
212 let context = match ctx_ref.as_mut() {
213 Some(context) => {
214 context.check_compatible_with_surface(&surface)?;
215 context
216 }
217 None => ctx_ref.insert(WgpuContext::new(instance, &surface, compositor_gpu)?),
218 };
219
220 let atlas = Arc::new(WgpuAtlas::new(
221 Arc::clone(&context.device),
222 Arc::clone(&context.queue),
223 ));
224
225 Self::new_internal(
226 Some(Rc::clone(&gpu_context)),
227 context,
228 surface,
229 config,
230 compositor_gpu,
231 atlas,
232 )
233 }
234
235 #[cfg(target_family = "wasm")]
236 pub fn new_from_canvas(
237 context: &WgpuContext,
238 canvas: &web_sys::HtmlCanvasElement,
239 config: WgpuSurfaceConfig,
240 ) -> anyhow::Result<Self> {
241 let surface = context
242 .instance
243 .create_surface(wgpu::SurfaceTarget::Canvas(canvas.clone()))
244 .map_err(|e| anyhow::anyhow!("Failed to create surface: {e}"))?;
245
246 let atlas = Arc::new(WgpuAtlas::new(
247 Arc::clone(&context.device),
248 Arc::clone(&context.queue),
249 ));
250
251 Self::new_internal(None, context, surface, config, None, atlas)
252 }
253
254 fn new_internal(
255 gpu_context: Option<GpuContext>,
256 context: &WgpuContext,
257 surface: wgpu::Surface<'static>,
258 config: WgpuSurfaceConfig,
259 compositor_gpu: Option<CompositorGpuHint>,
260 atlas: Arc<WgpuAtlas>,
261 ) -> anyhow::Result<Self> {
262 let surface_caps = surface.get_capabilities(&context.adapter);
263 let preferred_formats = [
264 wgpu::TextureFormat::Bgra8Unorm,
265 wgpu::TextureFormat::Rgba8Unorm,
266 ];
267 let surface_format = preferred_formats
268 .iter()
269 .find(|f| surface_caps.formats.contains(f))
270 .copied()
271 .or_else(|| surface_caps.formats.iter().find(|f| !f.is_srgb()).copied())
272 .or_else(|| surface_caps.formats.first().copied())
273 .ok_or_else(|| {
274 anyhow::anyhow!(
275 "Surface reports no supported texture formats for adapter {:?}",
276 context.adapter.get_info().name
277 )
278 })?;
279
280 let pick_alpha_mode =
281 |preferences: &[wgpu::CompositeAlphaMode]| -> anyhow::Result<wgpu::CompositeAlphaMode> {
282 preferences
283 .iter()
284 .find(|p| surface_caps.alpha_modes.contains(p))
285 .copied()
286 .or_else(|| surface_caps.alpha_modes.first().copied())
287 .ok_or_else(|| {
288 anyhow::anyhow!(
289 "Surface reports no supported alpha modes for adapter {:?}",
290 context.adapter.get_info().name
291 )
292 })
293 };
294
295 let transparent_alpha_mode = pick_alpha_mode(&[
296 wgpu::CompositeAlphaMode::PreMultiplied,
297 wgpu::CompositeAlphaMode::Inherit,
298 ])?;
299
300 let opaque_alpha_mode = pick_alpha_mode(&[
301 wgpu::CompositeAlphaMode::Opaque,
302 wgpu::CompositeAlphaMode::Inherit,
303 ])?;
304
305 let alpha_mode = if config.transparent {
306 transparent_alpha_mode
307 } else {
308 opaque_alpha_mode
309 };
310
311 let device = Arc::clone(&context.device);
312 let max_texture_size = device.limits().max_texture_dimension_2d;
313
314 let requested_width = config.size.width.0 as u32;
315 let requested_height = config.size.height.0 as u32;
316 let clamped_width = requested_width.min(max_texture_size);
317 let clamped_height = requested_height.min(max_texture_size);
318
319 if clamped_width != requested_width || clamped_height != requested_height {
320 warn!(
321 "Requested surface size ({}, {}) exceeds maximum texture dimension {}. \
322 Clamping to ({}, {}). Window content may not fill the entire window.",
323 requested_width, requested_height, max_texture_size, clamped_width, clamped_height
324 );
325 }
326
327 let surface_config = wgpu::SurfaceConfiguration {
328 usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
329 format: surface_format,
330 width: clamped_width.max(1),
331 height: clamped_height.max(1),
332 present_mode: config
333 .preferred_present_mode
334 .filter(|mode| surface_caps.present_modes.contains(mode))
335 .unwrap_or(wgpu::PresentMode::Fifo),
336 desired_maximum_frame_latency: 2,
337 alpha_mode,
338 view_formats: vec![],
339 };
340 // Configure the surface immediately. The adapter selection process already validated
341 // that this adapter can successfully configure this surface.
342 surface.configure(&context.device, &surface_config);
343
344 let queue = Arc::clone(&context.queue);
345 let dual_source_blending = context.supports_dual_source_blending();
346
347 let rendering_params = RenderingParameters::new(&context.adapter, surface_format);
348 let bind_group_layouts = Self::create_bind_group_layouts(&device);
349 let pipelines = Self::create_pipelines(
350 &device,
351 &bind_group_layouts,
352 surface_format,
353 alpha_mode,
354 rendering_params.path_sample_count,
355 dual_source_blending,
356 );
357
358 let atlas_sampler = device.create_sampler(&wgpu::SamplerDescriptor {
359 label: Some("atlas_sampler"),
360 mag_filter: wgpu::FilterMode::Linear,
361 min_filter: wgpu::FilterMode::Linear,
362 ..Default::default()
363 });
364
365 let uniform_alignment = device.limits().min_uniform_buffer_offset_alignment as u64;
366 let globals_size = std::mem::size_of::<GlobalParams>() as u64;
367 let gamma_size = std::mem::size_of::<GammaParams>() as u64;
368 let path_globals_offset = globals_size.next_multiple_of(uniform_alignment);
369 let gamma_offset = (path_globals_offset + globals_size).next_multiple_of(uniform_alignment);
370
371 let globals_buffer = device.create_buffer(&wgpu::BufferDescriptor {
372 label: Some("globals_buffer"),
373 size: gamma_offset + gamma_size,
374 usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
375 mapped_at_creation: false,
376 });
377
378 let max_buffer_size = device.limits().max_buffer_size;
379 let storage_buffer_alignment = device.limits().min_storage_buffer_offset_alignment as u64;
380 let initial_instance_buffer_capacity = 2 * 1024 * 1024;
381 let instance_buffer = device.create_buffer(&wgpu::BufferDescriptor {
382 label: Some("instance_buffer"),
383 size: initial_instance_buffer_capacity,
384 usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
385 mapped_at_creation: false,
386 });
387
388 let globals_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
389 label: Some("globals_bind_group"),
390 layout: &bind_group_layouts.globals,
391 entries: &[
392 wgpu::BindGroupEntry {
393 binding: 0,
394 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
395 buffer: &globals_buffer,
396 offset: 0,
397 size: Some(NonZeroU64::new(globals_size).unwrap()),
398 }),
399 },
400 wgpu::BindGroupEntry {
401 binding: 1,
402 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
403 buffer: &globals_buffer,
404 offset: gamma_offset,
405 size: Some(NonZeroU64::new(gamma_size).unwrap()),
406 }),
407 },
408 ],
409 });
410
411 let path_globals_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
412 label: Some("path_globals_bind_group"),
413 layout: &bind_group_layouts.globals,
414 entries: &[
415 wgpu::BindGroupEntry {
416 binding: 0,
417 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
418 buffer: &globals_buffer,
419 offset: path_globals_offset,
420 size: Some(NonZeroU64::new(globals_size).unwrap()),
421 }),
422 },
423 wgpu::BindGroupEntry {
424 binding: 1,
425 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
426 buffer: &globals_buffer,
427 offset: gamma_offset,
428 size: Some(NonZeroU64::new(gamma_size).unwrap()),
429 }),
430 },
431 ],
432 });
433
434 let adapter_info = context.adapter.get_info();
435
436 let last_error: Arc<Mutex<Option<String>>> = Arc::new(Mutex::new(None));
437 let last_error_clone = Arc::clone(&last_error);
438 device.on_uncaptured_error(Arc::new(move |error| {
439 let mut guard = last_error_clone.lock().unwrap();
440 *guard = Some(error.to_string());
441 }));
442
443 let resources = WgpuResources {
444 device,
445 queue,
446 surface,
447 pipelines,
448 bind_group_layouts,
449 atlas_sampler,
450 globals_buffer,
451 globals_bind_group,
452 path_globals_bind_group,
453 instance_buffer,
454 // Defer intermediate texture creation to first draw call via ensure_intermediate_textures().
455 // This avoids panics when the device/surface is in an invalid state during initialization.
456 path_intermediate_texture: None,
457 path_intermediate_view: None,
458 path_msaa_texture: None,
459 path_msaa_view: None,
460 };
461
462 Ok(Self {
463 context: gpu_context,
464 compositor_gpu,
465 resources: Some(resources),
466 surface_config,
467 atlas,
468 path_globals_offset,
469 gamma_offset,
470 instance_buffer_capacity: initial_instance_buffer_capacity,
471 max_buffer_size,
472 storage_buffer_alignment,
473 rendering_params,
474 dual_source_blending,
475 adapter_info,
476 transparent_alpha_mode,
477 opaque_alpha_mode,
478 max_texture_size,
479 last_error,
480 failed_frame_count: 0,
481 device_lost: context.device_lost_flag(),
482 surface_configured: true,
483 })
484 }
485
486 fn create_bind_group_layouts(device: &wgpu::Device) -> WgpuBindGroupLayouts {
487 let globals =
488 device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
489 label: Some("globals_layout"),
490 entries: &[
491 wgpu::BindGroupLayoutEntry {
492 binding: 0,
493 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
494 ty: wgpu::BindingType::Buffer {
495 ty: wgpu::BufferBindingType::Uniform,
496 has_dynamic_offset: false,
497 min_binding_size: NonZeroU64::new(
498 std::mem::size_of::<GlobalParams>() as u64
499 ),
500 },
501 count: None,
502 },
503 wgpu::BindGroupLayoutEntry {
504 binding: 1,
505 visibility: wgpu::ShaderStages::FRAGMENT,
506 ty: wgpu::BindingType::Buffer {
507 ty: wgpu::BufferBindingType::Uniform,
508 has_dynamic_offset: false,
509 min_binding_size: NonZeroU64::new(
510 std::mem::size_of::<GammaParams>() as u64
511 ),
512 },
513 count: None,
514 },
515 ],
516 });
517
518 let storage_buffer_entry = |binding: u32| wgpu::BindGroupLayoutEntry {
519 binding,
520 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
521 ty: wgpu::BindingType::Buffer {
522 ty: wgpu::BufferBindingType::Storage { read_only: true },
523 has_dynamic_offset: false,
524 min_binding_size: None,
525 },
526 count: None,
527 };
528
529 let instances = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
530 label: Some("instances_layout"),
531 entries: &[storage_buffer_entry(0)],
532 });
533
534 let instances_with_texture =
535 device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
536 label: Some("instances_with_texture_layout"),
537 entries: &[
538 storage_buffer_entry(0),
539 wgpu::BindGroupLayoutEntry {
540 binding: 1,
541 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
542 ty: wgpu::BindingType::Texture {
543 sample_type: wgpu::TextureSampleType::Float { filterable: true },
544 view_dimension: wgpu::TextureViewDimension::D2,
545 multisampled: false,
546 },
547 count: None,
548 },
549 wgpu::BindGroupLayoutEntry {
550 binding: 2,
551 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
552 ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
553 count: None,
554 },
555 ],
556 });
557
558 let surfaces = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
559 label: Some("surfaces_layout"),
560 entries: &[
561 wgpu::BindGroupLayoutEntry {
562 binding: 0,
563 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
564 ty: wgpu::BindingType::Buffer {
565 ty: wgpu::BufferBindingType::Uniform,
566 has_dynamic_offset: false,
567 min_binding_size: NonZeroU64::new(
568 std::mem::size_of::<SurfaceParams>() as u64
569 ),
570 },
571 count: None,
572 },
573 wgpu::BindGroupLayoutEntry {
574 binding: 1,
575 visibility: wgpu::ShaderStages::FRAGMENT,
576 ty: wgpu::BindingType::Texture {
577 sample_type: wgpu::TextureSampleType::Float { filterable: true },
578 view_dimension: wgpu::TextureViewDimension::D2,
579 multisampled: false,
580 },
581 count: None,
582 },
583 wgpu::BindGroupLayoutEntry {
584 binding: 2,
585 visibility: wgpu::ShaderStages::FRAGMENT,
586 ty: wgpu::BindingType::Texture {
587 sample_type: wgpu::TextureSampleType::Float { filterable: true },
588 view_dimension: wgpu::TextureViewDimension::D2,
589 multisampled: false,
590 },
591 count: None,
592 },
593 wgpu::BindGroupLayoutEntry {
594 binding: 3,
595 visibility: wgpu::ShaderStages::FRAGMENT,
596 ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
597 count: None,
598 },
599 ],
600 });
601
602 WgpuBindGroupLayouts {
603 globals,
604 instances,
605 instances_with_texture,
606 surfaces,
607 }
608 }
609
610 fn create_pipelines(
611 device: &wgpu::Device,
612 layouts: &WgpuBindGroupLayouts,
613 surface_format: wgpu::TextureFormat,
614 alpha_mode: wgpu::CompositeAlphaMode,
615 path_sample_count: u32,
616 dual_source_blending: bool,
617 ) -> WgpuPipelines {
618 // Diagnostic guard: verify the device actually has
619 // DUAL_SOURCE_BLENDING. We have a crash report (ZED-5G1) where a
620 // feature mismatch caused a wgpu-hal abort, but we haven't
621 // identified the code path that produces the mismatch. This
622 // guard prevents the crash and logs more evidence.
623 // Remove this check once:
624 // a) We find and fix the root cause, or
625 // b) There are no reports of this warning appearing for some time.
626 let device_has_feature = device
627 .features()
628 .contains(wgpu::Features::DUAL_SOURCE_BLENDING);
629 if dual_source_blending && !device_has_feature {
630 log::error!(
631 "BUG: dual_source_blending flag is true but device does not \
632 have DUAL_SOURCE_BLENDING enabled (device features: {:?}). \
633 Falling back to mono text rendering. Please report this at \
634 https://github.com/zed-industries/zed/issues",
635 device.features(),
636 );
637 }
638 let dual_source_blending = dual_source_blending && device_has_feature;
639
640 let base_shader_source = include_str!("shaders.wgsl");
641 let shader_module = device.create_shader_module(wgpu::ShaderModuleDescriptor {
642 label: Some("gpui_shaders"),
643 source: wgpu::ShaderSource::Wgsl(std::borrow::Cow::Borrowed(base_shader_source)),
644 });
645
646 let subpixel_shader_source = include_str!("shaders_subpixel.wgsl");
647 let subpixel_shader_module = if dual_source_blending {
648 let combined = format!(
649 "enable dual_source_blending;\n{base_shader_source}\n{subpixel_shader_source}"
650 );
651 Some(device.create_shader_module(wgpu::ShaderModuleDescriptor {
652 label: Some("gpui_subpixel_shaders"),
653 source: wgpu::ShaderSource::Wgsl(std::borrow::Cow::Owned(combined)),
654 }))
655 } else {
656 None
657 };
658
659 let blend_mode = match alpha_mode {
660 wgpu::CompositeAlphaMode::PreMultiplied => {
661 wgpu::BlendState::PREMULTIPLIED_ALPHA_BLENDING
662 }
663 _ => wgpu::BlendState::ALPHA_BLENDING,
664 };
665
666 let color_target = wgpu::ColorTargetState {
667 format: surface_format,
668 blend: Some(blend_mode),
669 write_mask: wgpu::ColorWrites::ALL,
670 };
671
672 let create_pipeline = |name: &str,
673 vs_entry: &str,
674 fs_entry: &str,
675 globals_layout: &wgpu::BindGroupLayout,
676 data_layout: &wgpu::BindGroupLayout,
677 topology: wgpu::PrimitiveTopology,
678 color_targets: &[Option<wgpu::ColorTargetState>],
679 sample_count: u32,
680 module: &wgpu::ShaderModule| {
681 let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
682 label: Some(&format!("{name}_layout")),
683 bind_group_layouts: &[Some(globals_layout), Some(data_layout)],
684 immediate_size: 0,
685 });
686
687 device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
688 label: Some(name),
689 layout: Some(&pipeline_layout),
690 vertex: wgpu::VertexState {
691 module,
692 entry_point: Some(vs_entry),
693 buffers: &[],
694 compilation_options: wgpu::PipelineCompilationOptions::default(),
695 },
696 fragment: Some(wgpu::FragmentState {
697 module,
698 entry_point: Some(fs_entry),
699 targets: color_targets,
700 compilation_options: wgpu::PipelineCompilationOptions::default(),
701 }),
702 primitive: wgpu::PrimitiveState {
703 topology,
704 strip_index_format: None,
705 front_face: wgpu::FrontFace::Ccw,
706 cull_mode: None,
707 polygon_mode: wgpu::PolygonMode::Fill,
708 unclipped_depth: false,
709 conservative: false,
710 },
711 depth_stencil: None,
712 multisample: wgpu::MultisampleState {
713 count: sample_count,
714 mask: !0,
715 alpha_to_coverage_enabled: false,
716 },
717 multiview_mask: None,
718 cache: None,
719 })
720 };
721
722 let quads = create_pipeline(
723 "quads",
724 "vs_quad",
725 "fs_quad",
726 &layouts.globals,
727 &layouts.instances,
728 wgpu::PrimitiveTopology::TriangleStrip,
729 &[Some(color_target.clone())],
730 1,
731 &shader_module,
732 );
733
734 let shadows = create_pipeline(
735 "shadows",
736 "vs_shadow",
737 "fs_shadow",
738 &layouts.globals,
739 &layouts.instances,
740 wgpu::PrimitiveTopology::TriangleStrip,
741 &[Some(color_target.clone())],
742 1,
743 &shader_module,
744 );
745
746 let path_rasterization = create_pipeline(
747 "path_rasterization",
748 "vs_path_rasterization",
749 "fs_path_rasterization",
750 &layouts.globals,
751 &layouts.instances,
752 wgpu::PrimitiveTopology::TriangleList,
753 &[Some(wgpu::ColorTargetState {
754 format: surface_format,
755 blend: Some(wgpu::BlendState::PREMULTIPLIED_ALPHA_BLENDING),
756 write_mask: wgpu::ColorWrites::ALL,
757 })],
758 path_sample_count,
759 &shader_module,
760 );
761
762 let paths_blend = wgpu::BlendState {
763 color: wgpu::BlendComponent {
764 src_factor: wgpu::BlendFactor::One,
765 dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
766 operation: wgpu::BlendOperation::Add,
767 },
768 alpha: wgpu::BlendComponent {
769 src_factor: wgpu::BlendFactor::One,
770 dst_factor: wgpu::BlendFactor::One,
771 operation: wgpu::BlendOperation::Add,
772 },
773 };
774
775 let paths = create_pipeline(
776 "paths",
777 "vs_path",
778 "fs_path",
779 &layouts.globals,
780 &layouts.instances_with_texture,
781 wgpu::PrimitiveTopology::TriangleStrip,
782 &[Some(wgpu::ColorTargetState {
783 format: surface_format,
784 blend: Some(paths_blend),
785 write_mask: wgpu::ColorWrites::ALL,
786 })],
787 1,
788 &shader_module,
789 );
790
791 let underlines = create_pipeline(
792 "underlines",
793 "vs_underline",
794 "fs_underline",
795 &layouts.globals,
796 &layouts.instances,
797 wgpu::PrimitiveTopology::TriangleStrip,
798 &[Some(color_target.clone())],
799 1,
800 &shader_module,
801 );
802
803 let mono_sprites = create_pipeline(
804 "mono_sprites",
805 "vs_mono_sprite",
806 "fs_mono_sprite",
807 &layouts.globals,
808 &layouts.instances_with_texture,
809 wgpu::PrimitiveTopology::TriangleStrip,
810 &[Some(color_target.clone())],
811 1,
812 &shader_module,
813 );
814
815 let subpixel_sprites = if let Some(subpixel_module) = &subpixel_shader_module {
816 let subpixel_blend = wgpu::BlendState {
817 color: wgpu::BlendComponent {
818 src_factor: wgpu::BlendFactor::Src1,
819 dst_factor: wgpu::BlendFactor::OneMinusSrc1,
820 operation: wgpu::BlendOperation::Add,
821 },
822 alpha: wgpu::BlendComponent {
823 src_factor: wgpu::BlendFactor::One,
824 dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
825 operation: wgpu::BlendOperation::Add,
826 },
827 };
828
829 Some(create_pipeline(
830 "subpixel_sprites",
831 "vs_subpixel_sprite",
832 "fs_subpixel_sprite",
833 &layouts.globals,
834 &layouts.instances_with_texture,
835 wgpu::PrimitiveTopology::TriangleStrip,
836 &[Some(wgpu::ColorTargetState {
837 format: surface_format,
838 blend: Some(subpixel_blend),
839 write_mask: wgpu::ColorWrites::COLOR,
840 })],
841 1,
842 subpixel_module,
843 ))
844 } else {
845 None
846 };
847
848 let poly_sprites = create_pipeline(
849 "poly_sprites",
850 "vs_poly_sprite",
851 "fs_poly_sprite",
852 &layouts.globals,
853 &layouts.instances_with_texture,
854 wgpu::PrimitiveTopology::TriangleStrip,
855 &[Some(color_target.clone())],
856 1,
857 &shader_module,
858 );
859
860 let surfaces = create_pipeline(
861 "surfaces",
862 "vs_surface",
863 "fs_surface",
864 &layouts.globals,
865 &layouts.surfaces,
866 wgpu::PrimitiveTopology::TriangleStrip,
867 &[Some(color_target)],
868 1,
869 &shader_module,
870 );
871
872 WgpuPipelines {
873 quads,
874 shadows,
875 path_rasterization,
876 paths,
877 underlines,
878 mono_sprites,
879 subpixel_sprites,
880 poly_sprites,
881 surfaces,
882 }
883 }
884
885 fn create_path_intermediate(
886 device: &wgpu::Device,
887 format: wgpu::TextureFormat,
888 width: u32,
889 height: u32,
890 ) -> (wgpu::Texture, wgpu::TextureView) {
891 let texture = device.create_texture(&wgpu::TextureDescriptor {
892 label: Some("path_intermediate"),
893 size: wgpu::Extent3d {
894 width: width.max(1),
895 height: height.max(1),
896 depth_or_array_layers: 1,
897 },
898 mip_level_count: 1,
899 sample_count: 1,
900 dimension: wgpu::TextureDimension::D2,
901 format,
902 usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::TEXTURE_BINDING,
903 view_formats: &[],
904 });
905 let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
906 (texture, view)
907 }
908
909 fn create_msaa_if_needed(
910 device: &wgpu::Device,
911 format: wgpu::TextureFormat,
912 width: u32,
913 height: u32,
914 sample_count: u32,
915 ) -> Option<(wgpu::Texture, wgpu::TextureView)> {
916 if sample_count <= 1 {
917 return None;
918 }
919 let texture = device.create_texture(&wgpu::TextureDescriptor {
920 label: Some("path_msaa"),
921 size: wgpu::Extent3d {
922 width: width.max(1),
923 height: height.max(1),
924 depth_or_array_layers: 1,
925 },
926 mip_level_count: 1,
927 sample_count,
928 dimension: wgpu::TextureDimension::D2,
929 format,
930 usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
931 view_formats: &[],
932 });
933 let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
934 Some((texture, view))
935 }
936
937 pub fn update_drawable_size(&mut self, size: Size<DevicePixels>) {
938 let width = size.width.0 as u32;
939 let height = size.height.0 as u32;
940
941 if width != self.surface_config.width || height != self.surface_config.height {
942 let clamped_width = width.min(self.max_texture_size);
943 let clamped_height = height.min(self.max_texture_size);
944
945 if clamped_width != width || clamped_height != height {
946 warn!(
947 "Requested surface size ({}, {}) exceeds maximum texture dimension {}. \
948 Clamping to ({}, {}). Window content may not fill the entire window.",
949 width, height, self.max_texture_size, clamped_width, clamped_height
950 );
951 }
952
953 self.surface_config.width = clamped_width.max(1);
954 self.surface_config.height = clamped_height.max(1);
955 let surface_config = self.surface_config.clone();
956
957 let resources = self.resources_mut();
958
959 // Wait for any in-flight GPU work to complete before destroying textures
960 if let Err(e) = resources.device.poll(wgpu::PollType::Wait {
961 submission_index: None,
962 timeout: None,
963 }) {
964 warn!("Failed to poll device during resize: {e:?}");
965 }
966
967 // Destroy old textures before allocating new ones to avoid GPU memory spikes
968 if let Some(ref texture) = resources.path_intermediate_texture {
969 texture.destroy();
970 }
971 if let Some(ref texture) = resources.path_msaa_texture {
972 texture.destroy();
973 }
974
975 resources
976 .surface
977 .configure(&resources.device, &surface_config);
978
979 // Invalidate intermediate textures - they will be lazily recreated
980 // in draw() after we confirm the surface is healthy. This avoids
981 // panics when the device/surface is in an invalid state during resize.
982 resources.path_intermediate_texture = None;
983 resources.path_intermediate_view = None;
984 resources.path_msaa_texture = None;
985 resources.path_msaa_view = None;
986 }
987 }
988
989 fn ensure_intermediate_textures(&mut self) {
990 if self.resources().path_intermediate_texture.is_some() {
991 return;
992 }
993
994 let format = self.surface_config.format;
995 let width = self.surface_config.width;
996 let height = self.surface_config.height;
997 let path_sample_count = self.rendering_params.path_sample_count;
998 let resources = self.resources_mut();
999
1000 let (t, v) = Self::create_path_intermediate(&resources.device, format, width, height);
1001 resources.path_intermediate_texture = Some(t);
1002 resources.path_intermediate_view = Some(v);
1003
1004 let (path_msaa_texture, path_msaa_view) = Self::create_msaa_if_needed(
1005 &resources.device,
1006 format,
1007 width,
1008 height,
1009 path_sample_count,
1010 )
1011 .map(|(t, v)| (Some(t), Some(v)))
1012 .unwrap_or((None, None));
1013 resources.path_msaa_texture = path_msaa_texture;
1014 resources.path_msaa_view = path_msaa_view;
1015 }
1016
1017 pub fn update_transparency(&mut self, transparent: bool) {
1018 let new_alpha_mode = if transparent {
1019 self.transparent_alpha_mode
1020 } else {
1021 self.opaque_alpha_mode
1022 };
1023
1024 if new_alpha_mode != self.surface_config.alpha_mode {
1025 self.surface_config.alpha_mode = new_alpha_mode;
1026 let surface_config = self.surface_config.clone();
1027 let path_sample_count = self.rendering_params.path_sample_count;
1028 let dual_source_blending = self.dual_source_blending;
1029 let resources = self.resources_mut();
1030 resources
1031 .surface
1032 .configure(&resources.device, &surface_config);
1033 resources.pipelines = Self::create_pipelines(
1034 &resources.device,
1035 &resources.bind_group_layouts,
1036 surface_config.format,
1037 surface_config.alpha_mode,
1038 path_sample_count,
1039 dual_source_blending,
1040 );
1041 }
1042 }
1043
1044 #[allow(dead_code)]
1045 pub fn viewport_size(&self) -> Size<DevicePixels> {
1046 Size {
1047 width: DevicePixels(self.surface_config.width as i32),
1048 height: DevicePixels(self.surface_config.height as i32),
1049 }
1050 }
1051
1052 pub fn sprite_atlas(&self) -> &Arc<WgpuAtlas> {
1053 &self.atlas
1054 }
1055
1056 pub fn supports_dual_source_blending(&self) -> bool {
1057 self.dual_source_blending
1058 }
1059
1060 pub fn gpu_specs(&self) -> GpuSpecs {
1061 GpuSpecs {
1062 is_software_emulated: self.adapter_info.device_type == wgpu::DeviceType::Cpu,
1063 device_name: self.adapter_info.name.clone(),
1064 driver_name: self.adapter_info.driver.clone(),
1065 driver_info: self.adapter_info.driver_info.clone(),
1066 }
1067 }
1068
1069 pub fn max_texture_size(&self) -> u32 {
1070 self.max_texture_size
1071 }
1072
1073 pub fn draw(&mut self, scene: &Scene) {
1074 // Bail out early if the surface has been unconfigured (e.g. during
1075 // Android background/rotation transitions). Attempting to acquire
1076 // a texture from an unconfigured surface can block indefinitely on
1077 // some drivers (Adreno).
1078 if !self.surface_configured {
1079 return;
1080 }
1081
1082 let last_error = self.last_error.lock().unwrap().take();
1083 if let Some(error) = last_error {
1084 self.failed_frame_count += 1;
1085 log::error!(
1086 "GPU error during frame (failure {} of 20): {error}",
1087 self.failed_frame_count
1088 );
1089 if self.failed_frame_count > 20 {
1090 panic!("Too many consecutive GPU errors. Last error: {error}");
1091 }
1092 } else {
1093 self.failed_frame_count = 0;
1094 }
1095
1096 self.atlas.before_frame();
1097
1098 let frame = match self.resources().surface.get_current_texture() {
1099 wgpu::CurrentSurfaceTexture::Success(frame) => frame,
1100 wgpu::CurrentSurfaceTexture::Suboptimal(frame) => {
1101 // Textures must be destroyed before the surface can be reconfigured.
1102 drop(frame);
1103 let surface_config = self.surface_config.clone();
1104 let resources = self.resources_mut();
1105 resources
1106 .surface
1107 .configure(&resources.device, &surface_config);
1108 return;
1109 }
1110 wgpu::CurrentSurfaceTexture::Lost | wgpu::CurrentSurfaceTexture::Outdated => {
1111 let surface_config = self.surface_config.clone();
1112 let resources = self.resources_mut();
1113 resources
1114 .surface
1115 .configure(&resources.device, &surface_config);
1116 return;
1117 }
1118 wgpu::CurrentSurfaceTexture::Timeout | wgpu::CurrentSurfaceTexture::Occluded => {
1119 return;
1120 }
1121 wgpu::CurrentSurfaceTexture::Validation => {
1122 *self.last_error.lock().unwrap() =
1123 Some("Surface texture validation error".to_string());
1124 return;
1125 }
1126 };
1127
1128 // Now that we know the surface is healthy, ensure intermediate textures exist
1129 self.ensure_intermediate_textures();
1130
1131 let frame_view = frame
1132 .texture
1133 .create_view(&wgpu::TextureViewDescriptor::default());
1134
1135 let gamma_params = GammaParams {
1136 gamma_ratios: self.rendering_params.gamma_ratios,
1137 grayscale_enhanced_contrast: self.rendering_params.grayscale_enhanced_contrast,
1138 subpixel_enhanced_contrast: self.rendering_params.subpixel_enhanced_contrast,
1139 _pad: [0.0; 2],
1140 };
1141
1142 let globals = GlobalParams {
1143 viewport_size: [
1144 self.surface_config.width as f32,
1145 self.surface_config.height as f32,
1146 ],
1147 premultiplied_alpha: if self.surface_config.alpha_mode
1148 == wgpu::CompositeAlphaMode::PreMultiplied
1149 {
1150 1
1151 } else {
1152 0
1153 },
1154 pad: 0,
1155 };
1156
1157 let path_globals = GlobalParams {
1158 premultiplied_alpha: 0,
1159 ..globals
1160 };
1161
1162 {
1163 let resources = self.resources();
1164 resources.queue.write_buffer(
1165 &resources.globals_buffer,
1166 0,
1167 bytemuck::bytes_of(&globals),
1168 );
1169 resources.queue.write_buffer(
1170 &resources.globals_buffer,
1171 self.path_globals_offset,
1172 bytemuck::bytes_of(&path_globals),
1173 );
1174 resources.queue.write_buffer(
1175 &resources.globals_buffer,
1176 self.gamma_offset,
1177 bytemuck::bytes_of(&gamma_params),
1178 );
1179 }
1180
1181 loop {
1182 let mut instance_offset: u64 = 0;
1183 let mut overflow = false;
1184
1185 let mut encoder =
1186 self.resources()
1187 .device
1188 .create_command_encoder(&wgpu::CommandEncoderDescriptor {
1189 label: Some("main_encoder"),
1190 });
1191
1192 {
1193 let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
1194 label: Some("main_pass"),
1195 color_attachments: &[Some(wgpu::RenderPassColorAttachment {
1196 view: &frame_view,
1197 resolve_target: None,
1198 ops: wgpu::Operations {
1199 load: wgpu::LoadOp::Clear(wgpu::Color::TRANSPARENT),
1200 store: wgpu::StoreOp::Store,
1201 },
1202 depth_slice: None,
1203 })],
1204 depth_stencil_attachment: None,
1205 ..Default::default()
1206 });
1207
1208 for batch in scene.batches() {
1209 let ok = match batch {
1210 PrimitiveBatch::Quads(range) => {
1211 self.draw_quads(&scene.quads[range], &mut instance_offset, &mut pass)
1212 }
1213 PrimitiveBatch::Shadows(range) => self.draw_shadows(
1214 &scene.shadows[range],
1215 &mut instance_offset,
1216 &mut pass,
1217 ),
1218 PrimitiveBatch::Paths(range) => {
1219 let paths = &scene.paths[range];
1220 if paths.is_empty() {
1221 continue;
1222 }
1223
1224 drop(pass);
1225
1226 let did_draw = self.draw_paths_to_intermediate(
1227 &mut encoder,
1228 paths,
1229 &mut instance_offset,
1230 );
1231
1232 pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
1233 label: Some("main_pass_continued"),
1234 color_attachments: &[Some(wgpu::RenderPassColorAttachment {
1235 view: &frame_view,
1236 resolve_target: None,
1237 ops: wgpu::Operations {
1238 load: wgpu::LoadOp::Load,
1239 store: wgpu::StoreOp::Store,
1240 },
1241 depth_slice: None,
1242 })],
1243 depth_stencil_attachment: None,
1244 ..Default::default()
1245 });
1246
1247 if did_draw {
1248 self.draw_paths_from_intermediate(
1249 paths,
1250 &mut instance_offset,
1251 &mut pass,
1252 )
1253 } else {
1254 false
1255 }
1256 }
1257 PrimitiveBatch::Underlines(range) => self.draw_underlines(
1258 &scene.underlines[range],
1259 &mut instance_offset,
1260 &mut pass,
1261 ),
1262 PrimitiveBatch::MonochromeSprites { texture_id, range } => self
1263 .draw_monochrome_sprites(
1264 &scene.monochrome_sprites[range],
1265 texture_id,
1266 &mut instance_offset,
1267 &mut pass,
1268 ),
1269 PrimitiveBatch::SubpixelSprites { texture_id, range } => self
1270 .draw_subpixel_sprites(
1271 &scene.subpixel_sprites[range],
1272 texture_id,
1273 &mut instance_offset,
1274 &mut pass,
1275 ),
1276 PrimitiveBatch::PolychromeSprites { texture_id, range } => self
1277 .draw_polychrome_sprites(
1278 &scene.polychrome_sprites[range],
1279 texture_id,
1280 &mut instance_offset,
1281 &mut pass,
1282 ),
1283 PrimitiveBatch::Surfaces(_surfaces) => {
1284 // Surfaces are macOS-only for video playback
1285 // Not implemented for Linux/wgpu
1286 true
1287 }
1288 };
1289 if !ok {
1290 overflow = true;
1291 break;
1292 }
1293 }
1294 }
1295
1296 if overflow {
1297 drop(encoder);
1298 if self.instance_buffer_capacity >= self.max_buffer_size {
1299 log::error!(
1300 "instance buffer size grew too large: {}",
1301 self.instance_buffer_capacity
1302 );
1303 frame.present();
1304 return;
1305 }
1306 self.grow_instance_buffer();
1307 continue;
1308 }
1309
1310 self.resources()
1311 .queue
1312 .submit(std::iter::once(encoder.finish()));
1313 frame.present();
1314 return;
1315 }
1316 }
1317
1318 fn draw_quads(
1319 &self,
1320 quads: &[Quad],
1321 instance_offset: &mut u64,
1322 pass: &mut wgpu::RenderPass<'_>,
1323 ) -> bool {
1324 let data = unsafe { Self::instance_bytes(quads) };
1325 self.draw_instances(
1326 data,
1327 quads.len() as u32,
1328 &self.resources().pipelines.quads,
1329 instance_offset,
1330 pass,
1331 )
1332 }
1333
1334 fn draw_shadows(
1335 &self,
1336 shadows: &[Shadow],
1337 instance_offset: &mut u64,
1338 pass: &mut wgpu::RenderPass<'_>,
1339 ) -> bool {
1340 let data = unsafe { Self::instance_bytes(shadows) };
1341 self.draw_instances(
1342 data,
1343 shadows.len() as u32,
1344 &self.resources().pipelines.shadows,
1345 instance_offset,
1346 pass,
1347 )
1348 }
1349
1350 fn draw_underlines(
1351 &self,
1352 underlines: &[Underline],
1353 instance_offset: &mut u64,
1354 pass: &mut wgpu::RenderPass<'_>,
1355 ) -> bool {
1356 let data = unsafe { Self::instance_bytes(underlines) };
1357 self.draw_instances(
1358 data,
1359 underlines.len() as u32,
1360 &self.resources().pipelines.underlines,
1361 instance_offset,
1362 pass,
1363 )
1364 }
1365
1366 fn draw_monochrome_sprites(
1367 &self,
1368 sprites: &[MonochromeSprite],
1369 texture_id: AtlasTextureId,
1370 instance_offset: &mut u64,
1371 pass: &mut wgpu::RenderPass<'_>,
1372 ) -> bool {
1373 let tex_info = self.atlas.get_texture_info(texture_id);
1374 let data = unsafe { Self::instance_bytes(sprites) };
1375 self.draw_instances_with_texture(
1376 data,
1377 sprites.len() as u32,
1378 &tex_info.view,
1379 &self.resources().pipelines.mono_sprites,
1380 instance_offset,
1381 pass,
1382 )
1383 }
1384
1385 fn draw_subpixel_sprites(
1386 &self,
1387 sprites: &[SubpixelSprite],
1388 texture_id: AtlasTextureId,
1389 instance_offset: &mut u64,
1390 pass: &mut wgpu::RenderPass<'_>,
1391 ) -> bool {
1392 let tex_info = self.atlas.get_texture_info(texture_id);
1393 let data = unsafe { Self::instance_bytes(sprites) };
1394 let resources = self.resources();
1395 let pipeline = resources
1396 .pipelines
1397 .subpixel_sprites
1398 .as_ref()
1399 .unwrap_or(&resources.pipelines.mono_sprites);
1400 self.draw_instances_with_texture(
1401 data,
1402 sprites.len() as u32,
1403 &tex_info.view,
1404 pipeline,
1405 instance_offset,
1406 pass,
1407 )
1408 }
1409
1410 fn draw_polychrome_sprites(
1411 &self,
1412 sprites: &[PolychromeSprite],
1413 texture_id: AtlasTextureId,
1414 instance_offset: &mut u64,
1415 pass: &mut wgpu::RenderPass<'_>,
1416 ) -> bool {
1417 let tex_info = self.atlas.get_texture_info(texture_id);
1418 let data = unsafe { Self::instance_bytes(sprites) };
1419 self.draw_instances_with_texture(
1420 data,
1421 sprites.len() as u32,
1422 &tex_info.view,
1423 &self.resources().pipelines.poly_sprites,
1424 instance_offset,
1425 pass,
1426 )
1427 }
1428
1429 fn draw_instances(
1430 &self,
1431 data: &[u8],
1432 instance_count: u32,
1433 pipeline: &wgpu::RenderPipeline,
1434 instance_offset: &mut u64,
1435 pass: &mut wgpu::RenderPass<'_>,
1436 ) -> bool {
1437 if instance_count == 0 {
1438 return true;
1439 }
1440 let Some((offset, size)) = self.write_to_instance_buffer(instance_offset, data) else {
1441 return false;
1442 };
1443 let resources = self.resources();
1444 let bind_group = resources
1445 .device
1446 .create_bind_group(&wgpu::BindGroupDescriptor {
1447 label: None,
1448 layout: &resources.bind_group_layouts.instances,
1449 entries: &[wgpu::BindGroupEntry {
1450 binding: 0,
1451 resource: self.instance_binding(offset, size),
1452 }],
1453 });
1454 pass.set_pipeline(pipeline);
1455 pass.set_bind_group(0, &resources.globals_bind_group, &[]);
1456 pass.set_bind_group(1, &bind_group, &[]);
1457 pass.draw(0..4, 0..instance_count);
1458 true
1459 }
1460
1461 fn draw_instances_with_texture(
1462 &self,
1463 data: &[u8],
1464 instance_count: u32,
1465 texture_view: &wgpu::TextureView,
1466 pipeline: &wgpu::RenderPipeline,
1467 instance_offset: &mut u64,
1468 pass: &mut wgpu::RenderPass<'_>,
1469 ) -> bool {
1470 if instance_count == 0 {
1471 return true;
1472 }
1473 let Some((offset, size)) = self.write_to_instance_buffer(instance_offset, data) else {
1474 return false;
1475 };
1476 let resources = self.resources();
1477 let bind_group = resources
1478 .device
1479 .create_bind_group(&wgpu::BindGroupDescriptor {
1480 label: None,
1481 layout: &resources.bind_group_layouts.instances_with_texture,
1482 entries: &[
1483 wgpu::BindGroupEntry {
1484 binding: 0,
1485 resource: self.instance_binding(offset, size),
1486 },
1487 wgpu::BindGroupEntry {
1488 binding: 1,
1489 resource: wgpu::BindingResource::TextureView(texture_view),
1490 },
1491 wgpu::BindGroupEntry {
1492 binding: 2,
1493 resource: wgpu::BindingResource::Sampler(&resources.atlas_sampler),
1494 },
1495 ],
1496 });
1497 pass.set_pipeline(pipeline);
1498 pass.set_bind_group(0, &resources.globals_bind_group, &[]);
1499 pass.set_bind_group(1, &bind_group, &[]);
1500 pass.draw(0..4, 0..instance_count);
1501 true
1502 }
1503
1504 unsafe fn instance_bytes<T>(instances: &[T]) -> &[u8] {
1505 unsafe {
1506 std::slice::from_raw_parts(
1507 instances.as_ptr() as *const u8,
1508 std::mem::size_of_val(instances),
1509 )
1510 }
1511 }
1512
1513 fn draw_paths_from_intermediate(
1514 &self,
1515 paths: &[Path<ScaledPixels>],
1516 instance_offset: &mut u64,
1517 pass: &mut wgpu::RenderPass<'_>,
1518 ) -> bool {
1519 let first_path = &paths[0];
1520 let sprites: Vec<PathSprite> = if paths.last().map(|p| &p.order) == Some(&first_path.order)
1521 {
1522 paths
1523 .iter()
1524 .map(|p| PathSprite {
1525 bounds: p.clipped_bounds(),
1526 })
1527 .collect()
1528 } else {
1529 let mut bounds = first_path.clipped_bounds();
1530 for path in paths.iter().skip(1) {
1531 bounds = bounds.union(&path.clipped_bounds());
1532 }
1533 vec![PathSprite { bounds }]
1534 };
1535
1536 let resources = self.resources();
1537 let Some(path_intermediate_view) = resources.path_intermediate_view.as_ref() else {
1538 return true;
1539 };
1540
1541 let sprite_data = unsafe { Self::instance_bytes(&sprites) };
1542 self.draw_instances_with_texture(
1543 sprite_data,
1544 sprites.len() as u32,
1545 path_intermediate_view,
1546 &resources.pipelines.paths,
1547 instance_offset,
1548 pass,
1549 )
1550 }
1551
1552 fn draw_paths_to_intermediate(
1553 &self,
1554 encoder: &mut wgpu::CommandEncoder,
1555 paths: &[Path<ScaledPixels>],
1556 instance_offset: &mut u64,
1557 ) -> bool {
1558 let mut vertices = Vec::new();
1559 for path in paths {
1560 let bounds = path.clipped_bounds();
1561 vertices.extend(path.vertices.iter().map(|v| PathRasterizationVertex {
1562 xy_position: v.xy_position,
1563 st_position: v.st_position,
1564 color: path.color,
1565 bounds,
1566 }));
1567 }
1568
1569 if vertices.is_empty() {
1570 return true;
1571 }
1572
1573 let vertex_data = unsafe { Self::instance_bytes(&vertices) };
1574 let Some((vertex_offset, vertex_size)) =
1575 self.write_to_instance_buffer(instance_offset, vertex_data)
1576 else {
1577 return false;
1578 };
1579
1580 let resources = self.resources();
1581 let data_bind_group = resources
1582 .device
1583 .create_bind_group(&wgpu::BindGroupDescriptor {
1584 label: Some("path_rasterization_bind_group"),
1585 layout: &resources.bind_group_layouts.instances,
1586 entries: &[wgpu::BindGroupEntry {
1587 binding: 0,
1588 resource: self.instance_binding(vertex_offset, vertex_size),
1589 }],
1590 });
1591
1592 let Some(path_intermediate_view) = resources.path_intermediate_view.as_ref() else {
1593 return true;
1594 };
1595
1596 let (target_view, resolve_target) = if let Some(ref msaa_view) = resources.path_msaa_view {
1597 (msaa_view, Some(path_intermediate_view))
1598 } else {
1599 (path_intermediate_view, None)
1600 };
1601
1602 {
1603 let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
1604 label: Some("path_rasterization_pass"),
1605 color_attachments: &[Some(wgpu::RenderPassColorAttachment {
1606 view: target_view,
1607 resolve_target,
1608 ops: wgpu::Operations {
1609 load: wgpu::LoadOp::Clear(wgpu::Color::TRANSPARENT),
1610 store: wgpu::StoreOp::Store,
1611 },
1612 depth_slice: None,
1613 })],
1614 depth_stencil_attachment: None,
1615 ..Default::default()
1616 });
1617
1618 pass.set_pipeline(&resources.pipelines.path_rasterization);
1619 pass.set_bind_group(0, &resources.path_globals_bind_group, &[]);
1620 pass.set_bind_group(1, &data_bind_group, &[]);
1621 pass.draw(0..vertices.len() as u32, 0..1);
1622 }
1623
1624 true
1625 }
1626
1627 fn grow_instance_buffer(&mut self) {
1628 let new_capacity = (self.instance_buffer_capacity * 2).min(self.max_buffer_size);
1629 log::info!("increased instance buffer size to {}", new_capacity);
1630 let resources = self.resources_mut();
1631 resources.instance_buffer = resources.device.create_buffer(&wgpu::BufferDescriptor {
1632 label: Some("instance_buffer"),
1633 size: new_capacity,
1634 usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
1635 mapped_at_creation: false,
1636 });
1637 self.instance_buffer_capacity = new_capacity;
1638 }
1639
1640 fn write_to_instance_buffer(
1641 &self,
1642 instance_offset: &mut u64,
1643 data: &[u8],
1644 ) -> Option<(u64, NonZeroU64)> {
1645 let offset = (*instance_offset).next_multiple_of(self.storage_buffer_alignment);
1646 let size = (data.len() as u64).max(16);
1647 if offset + size > self.instance_buffer_capacity {
1648 return None;
1649 }
1650 let resources = self.resources();
1651 resources
1652 .queue
1653 .write_buffer(&resources.instance_buffer, offset, data);
1654 *instance_offset = offset + size;
1655 Some((offset, NonZeroU64::new(size).expect("size is at least 16")))
1656 }
1657
1658 fn instance_binding(&self, offset: u64, size: NonZeroU64) -> wgpu::BindingResource<'_> {
1659 wgpu::BindingResource::Buffer(wgpu::BufferBinding {
1660 buffer: &self.resources().instance_buffer,
1661 offset,
1662 size: Some(size),
1663 })
1664 }
1665
1666 /// Mark the surface as unconfigured so rendering is skipped until a new
1667 /// surface is provided via [`replace_surface`](Self::replace_surface).
1668 ///
1669 /// This does **not** drop the renderer — the device, queue, atlas, and
1670 /// pipelines stay alive. Use this when the native window is destroyed
1671 /// (e.g. Android `TerminateWindow`) but you intend to re-create the
1672 /// surface later without losing cached atlas textures.
1673 pub fn unconfigure_surface(&mut self) {
1674 self.surface_configured = false;
1675 // Drop intermediate textures since they reference the old surface size.
1676 if let Some(res) = self.resources.as_mut() {
1677 res.path_intermediate_texture = None;
1678 res.path_intermediate_view = None;
1679 res.path_msaa_texture = None;
1680 res.path_msaa_view = None;
1681 }
1682 }
1683
1684 /// Replace the wgpu surface with a new one (e.g. after Android destroys
1685 /// and recreates the native window). Keeps the device, queue, atlas, and
1686 /// all pipelines intact so cached `AtlasTextureId`s remain valid.
1687 ///
1688 /// The `instance` **must** be the same [`wgpu::Instance`] that was used to
1689 /// create the adapter and device (i.e. from the [`WgpuContext`]). Using a
1690 /// different instance will cause a "Device does not exist" panic because
1691 /// the wgpu device is bound to its originating instance.
1692 #[cfg(not(target_family = "wasm"))]
1693 pub fn replace_surface<W: HasWindowHandle>(
1694 &mut self,
1695 window: &W,
1696 config: WgpuSurfaceConfig,
1697 instance: &wgpu::Instance,
1698 ) -> anyhow::Result<()> {
1699 let window_handle = window
1700 .window_handle()
1701 .map_err(|e| anyhow::anyhow!("Failed to get window handle: {e}"))?;
1702
1703 let surface = create_surface(instance, window_handle.as_raw())?;
1704
1705 let width = (config.size.width.0 as u32).max(1);
1706 let height = (config.size.height.0 as u32).max(1);
1707
1708 let alpha_mode = if config.transparent {
1709 self.transparent_alpha_mode
1710 } else {
1711 self.opaque_alpha_mode
1712 };
1713
1714 self.surface_config.width = width;
1715 self.surface_config.height = height;
1716 self.surface_config.alpha_mode = alpha_mode;
1717 if let Some(mode) = config.preferred_present_mode {
1718 self.surface_config.present_mode = mode;
1719 }
1720
1721 {
1722 let res = self
1723 .resources
1724 .as_mut()
1725 .expect("GPU resources not available");
1726 surface.configure(&res.device, &self.surface_config);
1727 res.surface = surface;
1728
1729 // Invalidate intermediate textures — they'll be recreated lazily.
1730 res.path_intermediate_texture = None;
1731 res.path_intermediate_view = None;
1732 res.path_msaa_texture = None;
1733 res.path_msaa_view = None;
1734 }
1735
1736 self.surface_configured = true;
1737
1738 Ok(())
1739 }
1740
1741 pub fn destroy(&mut self) {
1742 // Release surface-bound GPU resources eagerly so the underlying native
1743 // window can be destroyed before the renderer itself is dropped.
1744 self.resources.take();
1745 }
1746
1747 /// Returns true if the GPU device was lost and recovery is needed.
1748 pub fn device_lost(&self) -> bool {
1749 self.device_lost.load(std::sync::atomic::Ordering::SeqCst)
1750 }
1751
1752 /// Recovers from a lost GPU device by recreating the renderer with a new context.
1753 ///
1754 /// Call this after detecting `device_lost()` returns true.
1755 ///
1756 /// This method coordinates recovery across multiple windows:
1757 /// - The first window to call this will recreate the shared context
1758 /// - Subsequent windows will adopt the already-recovered context
1759 #[cfg(not(target_family = "wasm"))]
1760 pub fn recover<W>(&mut self, window: &W) -> anyhow::Result<()>
1761 where
1762 W: HasWindowHandle + HasDisplayHandle + std::fmt::Debug + Send + Sync + Clone + 'static,
1763 {
1764 let gpu_context = self.context.as_ref().expect("recover requires gpu_context");
1765
1766 // Check if another window already recovered the context
1767 let needs_new_context = gpu_context
1768 .borrow()
1769 .as_ref()
1770 .is_none_or(|ctx| ctx.device_lost());
1771
1772 let window_handle = window
1773 .window_handle()
1774 .map_err(|e| anyhow::anyhow!("Failed to get window handle: {e}"))?;
1775
1776 let surface = if needs_new_context {
1777 log::warn!("GPU device lost, recreating context...");
1778
1779 // Drop old resources to release Arc<Device>/Arc<Queue> and GPU resources
1780 self.resources = None;
1781 *gpu_context.borrow_mut() = None;
1782
1783 // Wait for GPU driver to stabilize (350ms copied from windows :shrug:)
1784 std::thread::sleep(std::time::Duration::from_millis(350));
1785
1786 let instance = WgpuContext::instance(Box::new(window.clone()));
1787 let surface = create_surface(&instance, window_handle.as_raw())?;
1788 let new_context = WgpuContext::new(instance, &surface, self.compositor_gpu)?;
1789 *gpu_context.borrow_mut() = Some(new_context);
1790 surface
1791 } else {
1792 let ctx_ref = gpu_context.borrow();
1793 let instance = &ctx_ref.as_ref().unwrap().instance;
1794 create_surface(instance, window_handle.as_raw())?
1795 };
1796
1797 let config = WgpuSurfaceConfig {
1798 size: gpui::Size {
1799 width: gpui::DevicePixels(self.surface_config.width as i32),
1800 height: gpui::DevicePixels(self.surface_config.height as i32),
1801 },
1802 transparent: self.surface_config.alpha_mode != wgpu::CompositeAlphaMode::Opaque,
1803 preferred_present_mode: Some(self.surface_config.present_mode),
1804 };
1805 let gpu_context = Rc::clone(gpu_context);
1806 let ctx_ref = gpu_context.borrow();
1807 let context = ctx_ref.as_ref().expect("context should exist");
1808
1809 self.resources = None;
1810 self.atlas
1811 .handle_device_lost(Arc::clone(&context.device), Arc::clone(&context.queue));
1812
1813 *self = Self::new_internal(
1814 Some(gpu_context.clone()),
1815 context,
1816 surface,
1817 config,
1818 self.compositor_gpu,
1819 self.atlas.clone(),
1820 )?;
1821
1822 log::info!("GPU recovery complete");
1823 Ok(())
1824 }
1825}
1826
1827#[cfg(not(target_family = "wasm"))]
1828fn create_surface(
1829 instance: &wgpu::Instance,
1830 raw_window_handle: raw_window_handle::RawWindowHandle,
1831) -> anyhow::Result<wgpu::Surface<'static>> {
1832 unsafe {
1833 instance
1834 .create_surface_unsafe(wgpu::SurfaceTargetUnsafe::RawHandle {
1835 // Fall back to the display handle already provided via InstanceDescriptor::display.
1836 raw_display_handle: None,
1837 raw_window_handle,
1838 })
1839 .map_err(|e| anyhow::anyhow!("{e}"))
1840 }
1841}
1842
1843struct RenderingParameters {
1844 path_sample_count: u32,
1845 gamma_ratios: [f32; 4],
1846 grayscale_enhanced_contrast: f32,
1847 subpixel_enhanced_contrast: f32,
1848}
1849
1850impl RenderingParameters {
1851 fn new(adapter: &wgpu::Adapter, surface_format: wgpu::TextureFormat) -> Self {
1852 use std::env;
1853
1854 let format_features = adapter.get_texture_format_features(surface_format);
1855 let path_sample_count = [4, 2, 1]
1856 .into_iter()
1857 .find(|&n| format_features.flags.sample_count_supported(n))
1858 .unwrap_or(1);
1859
1860 let gamma = env::var("ZED_FONTS_GAMMA")
1861 .ok()
1862 .and_then(|v| v.parse().ok())
1863 .unwrap_or(1.8_f32)
1864 .clamp(1.0, 2.2);
1865 let gamma_ratios = get_gamma_correction_ratios(gamma);
1866
1867 let grayscale_enhanced_contrast = env::var("ZED_FONTS_GRAYSCALE_ENHANCED_CONTRAST")
1868 .ok()
1869 .and_then(|v| v.parse().ok())
1870 .unwrap_or(1.0_f32)
1871 .max(0.0);
1872
1873 let subpixel_enhanced_contrast = env::var("ZED_FONTS_SUBPIXEL_ENHANCED_CONTRAST")
1874 .ok()
1875 .and_then(|v| v.parse().ok())
1876 .unwrap_or(0.5_f32)
1877 .max(0.0);
1878
1879 Self {
1880 path_sample_count,
1881 gamma_ratios,
1882 grayscale_enhanced_contrast,
1883 subpixel_enhanced_contrast,
1884 }
1885 }
1886}