1use crate::{CompositorGpuHint, WgpuAtlas, WgpuContext};
2use bytemuck::{Pod, Zeroable};
3use gpui::{
4 AtlasTextureId, Background, Bounds, DevicePixels, GpuSpecs, MonochromeSprite, Path, Point,
5 PolychromeSprite, PrimitiveBatch, Quad, ScaledPixels, Scene, Shadow, Size, SubpixelSprite,
6 Underline, get_gamma_correction_ratios,
7};
8use log::warn;
9#[cfg(not(target_family = "wasm"))]
10use raw_window_handle::{HasDisplayHandle, HasWindowHandle};
11use std::cell::RefCell;
12use std::num::NonZeroU64;
13use std::rc::Rc;
14use std::sync::{Arc, Mutex};
15
16#[repr(C)]
17#[derive(Clone, Copy, Pod, Zeroable)]
18struct GlobalParams {
19 viewport_size: [f32; 2],
20 premultiplied_alpha: u32,
21 pad: u32,
22}
23
24#[repr(C)]
25#[derive(Clone, Copy, Pod, Zeroable)]
26struct PodBounds {
27 origin: [f32; 2],
28 size: [f32; 2],
29}
30
31impl From<Bounds<ScaledPixels>> for PodBounds {
32 fn from(bounds: Bounds<ScaledPixels>) -> Self {
33 Self {
34 origin: [bounds.origin.x.0, bounds.origin.y.0],
35 size: [bounds.size.width.0, bounds.size.height.0],
36 }
37 }
38}
39
40#[repr(C)]
41#[derive(Clone, Copy, Pod, Zeroable)]
42struct SurfaceParams {
43 bounds: PodBounds,
44 content_mask: PodBounds,
45}
46
47#[repr(C)]
48#[derive(Clone, Copy, Pod, Zeroable)]
49struct GammaParams {
50 gamma_ratios: [f32; 4],
51 grayscale_enhanced_contrast: f32,
52 subpixel_enhanced_contrast: f32,
53 _pad: [f32; 2],
54}
55
56#[derive(Clone, Debug)]
57#[repr(C)]
58struct PathSprite {
59 bounds: Bounds<ScaledPixels>,
60}
61
62#[derive(Clone, Debug)]
63#[repr(C)]
64struct PathRasterizationVertex {
65 xy_position: Point<ScaledPixels>,
66 st_position: Point<f32>,
67 color: Background,
68 bounds: Bounds<ScaledPixels>,
69}
70
71pub struct WgpuSurfaceConfig {
72 pub size: Size<DevicePixels>,
73 pub transparent: bool,
74}
75
76struct WgpuPipelines {
77 quads: wgpu::RenderPipeline,
78 shadows: wgpu::RenderPipeline,
79 path_rasterization: wgpu::RenderPipeline,
80 paths: wgpu::RenderPipeline,
81 underlines: wgpu::RenderPipeline,
82 mono_sprites: wgpu::RenderPipeline,
83 subpixel_sprites: Option<wgpu::RenderPipeline>,
84 poly_sprites: wgpu::RenderPipeline,
85 #[allow(dead_code)]
86 surfaces: wgpu::RenderPipeline,
87}
88
89struct WgpuBindGroupLayouts {
90 globals: wgpu::BindGroupLayout,
91 instances: wgpu::BindGroupLayout,
92 instances_with_texture: wgpu::BindGroupLayout,
93 surfaces: wgpu::BindGroupLayout,
94}
95
96/// Shared GPU context reference, used to coordinate device recovery across multiple windows.
97pub type GpuContext = Rc<RefCell<Option<WgpuContext>>>;
98
99/// GPU resources that must be dropped together during device recovery.
100struct WgpuResources {
101 device: Arc<wgpu::Device>,
102 queue: Arc<wgpu::Queue>,
103 surface: wgpu::Surface<'static>,
104 pipelines: WgpuPipelines,
105 bind_group_layouts: WgpuBindGroupLayouts,
106 atlas_sampler: wgpu::Sampler,
107 globals_buffer: wgpu::Buffer,
108 globals_bind_group: wgpu::BindGroup,
109 path_globals_bind_group: wgpu::BindGroup,
110 instance_buffer: wgpu::Buffer,
111 path_intermediate_texture: Option<wgpu::Texture>,
112 path_intermediate_view: Option<wgpu::TextureView>,
113 path_msaa_texture: Option<wgpu::Texture>,
114 path_msaa_view: Option<wgpu::TextureView>,
115}
116
117pub struct WgpuRenderer {
118 /// Shared GPU context for device recovery coordination (unused on WASM).
119 #[allow(dead_code)]
120 context: Option<GpuContext>,
121 /// Compositor GPU hint for adapter selection (unused on WASM).
122 #[allow(dead_code)]
123 compositor_gpu: Option<CompositorGpuHint>,
124 resources: Option<WgpuResources>,
125 surface_config: wgpu::SurfaceConfiguration,
126 atlas: Arc<WgpuAtlas>,
127 path_globals_offset: u64,
128 gamma_offset: u64,
129 instance_buffer_capacity: u64,
130 max_buffer_size: u64,
131 storage_buffer_alignment: u64,
132 rendering_params: RenderingParameters,
133 dual_source_blending: bool,
134 adapter_info: wgpu::AdapterInfo,
135 transparent_alpha_mode: wgpu::CompositeAlphaMode,
136 opaque_alpha_mode: wgpu::CompositeAlphaMode,
137 max_texture_size: u32,
138 last_error: Arc<Mutex<Option<String>>>,
139 failed_frame_count: u32,
140 device_lost: std::sync::Arc<std::sync::atomic::AtomicBool>,
141}
142
143impl WgpuRenderer {
144 fn resources(&self) -> &WgpuResources {
145 self.resources
146 .as_ref()
147 .expect("GPU resources not available")
148 }
149
150 fn resources_mut(&mut self) -> &mut WgpuResources {
151 self.resources
152 .as_mut()
153 .expect("GPU resources not available")
154 }
155
156 /// Creates a new WgpuRenderer from raw window handles.
157 ///
158 /// The `gpu_context` is a shared reference that coordinates GPU context across
159 /// multiple windows. The first window to create a renderer will initialize the
160 /// context; subsequent windows will share it.
161 ///
162 /// # Safety
163 /// The caller must ensure that the window handle remains valid for the lifetime
164 /// of the returned renderer.
165 #[cfg(not(target_family = "wasm"))]
166 pub fn new<W>(
167 gpu_context: GpuContext,
168 window: &W,
169 config: WgpuSurfaceConfig,
170 compositor_gpu: Option<CompositorGpuHint>,
171 ) -> anyhow::Result<Self>
172 where
173 W: HasWindowHandle + HasDisplayHandle + std::fmt::Debug + Send + Sync + Clone + 'static,
174 {
175 let window_handle = window
176 .window_handle()
177 .map_err(|e| anyhow::anyhow!("Failed to get window handle: {e}"))?;
178
179 let target = wgpu::SurfaceTargetUnsafe::RawHandle {
180 // Fall back to the display handle already provided via InstanceDescriptor::display.
181 raw_display_handle: None,
182 raw_window_handle: window_handle.as_raw(),
183 };
184
185 // Use the existing context's instance if available, otherwise create a new one.
186 // The surface must be created with the same instance that will be used for
187 // adapter selection, otherwise wgpu will panic.
188 let instance = gpu_context
189 .borrow()
190 .as_ref()
191 .map(|ctx| ctx.instance.clone())
192 .unwrap_or_else(|| WgpuContext::instance(Box::new(window.clone())));
193
194 // Safety: The caller guarantees that the window handle is valid for the
195 // lifetime of this renderer. In practice, the RawWindow struct is created
196 // from the native window handles and the surface is dropped before the window.
197 let surface = unsafe {
198 instance
199 .create_surface_unsafe(target)
200 .map_err(|e| anyhow::anyhow!("Failed to create surface: {e}"))?
201 };
202
203 let mut ctx_ref = gpu_context.borrow_mut();
204 let context = match ctx_ref.as_mut() {
205 Some(context) => {
206 context.check_compatible_with_surface(&surface)?;
207 context
208 }
209 None => ctx_ref.insert(WgpuContext::new(instance, &surface, compositor_gpu)?),
210 };
211
212 let atlas = Arc::new(WgpuAtlas::new(
213 Arc::clone(&context.device),
214 Arc::clone(&context.queue),
215 ));
216
217 Self::new_internal(
218 Some(Rc::clone(&gpu_context)),
219 context,
220 surface,
221 config,
222 compositor_gpu,
223 atlas,
224 )
225 }
226
227 #[cfg(target_family = "wasm")]
228 pub fn new_from_canvas(
229 context: &WgpuContext,
230 canvas: &web_sys::HtmlCanvasElement,
231 config: WgpuSurfaceConfig,
232 ) -> anyhow::Result<Self> {
233 let surface = context
234 .instance
235 .create_surface(wgpu::SurfaceTarget::Canvas(canvas.clone()))
236 .map_err(|e| anyhow::anyhow!("Failed to create surface: {e}"))?;
237
238 let atlas = Arc::new(WgpuAtlas::new(
239 Arc::clone(&context.device),
240 Arc::clone(&context.queue),
241 ));
242
243 Self::new_internal(None, context, surface, config, None, atlas)
244 }
245
246 fn new_internal(
247 gpu_context: Option<GpuContext>,
248 context: &WgpuContext,
249 surface: wgpu::Surface<'static>,
250 config: WgpuSurfaceConfig,
251 compositor_gpu: Option<CompositorGpuHint>,
252 atlas: Arc<WgpuAtlas>,
253 ) -> anyhow::Result<Self> {
254 let surface_caps = surface.get_capabilities(&context.adapter);
255 let preferred_formats = [
256 wgpu::TextureFormat::Bgra8Unorm,
257 wgpu::TextureFormat::Rgba8Unorm,
258 ];
259 let surface_format = preferred_formats
260 .iter()
261 .find(|f| surface_caps.formats.contains(f))
262 .copied()
263 .or_else(|| surface_caps.formats.iter().find(|f| !f.is_srgb()).copied())
264 .or_else(|| surface_caps.formats.first().copied())
265 .ok_or_else(|| {
266 anyhow::anyhow!(
267 "Surface reports no supported texture formats for adapter {:?}",
268 context.adapter.get_info().name
269 )
270 })?;
271
272 let pick_alpha_mode =
273 |preferences: &[wgpu::CompositeAlphaMode]| -> anyhow::Result<wgpu::CompositeAlphaMode> {
274 preferences
275 .iter()
276 .find(|p| surface_caps.alpha_modes.contains(p))
277 .copied()
278 .or_else(|| surface_caps.alpha_modes.first().copied())
279 .ok_or_else(|| {
280 anyhow::anyhow!(
281 "Surface reports no supported alpha modes for adapter {:?}",
282 context.adapter.get_info().name
283 )
284 })
285 };
286
287 let transparent_alpha_mode = pick_alpha_mode(&[
288 wgpu::CompositeAlphaMode::PreMultiplied,
289 wgpu::CompositeAlphaMode::Inherit,
290 ])?;
291
292 let opaque_alpha_mode = pick_alpha_mode(&[
293 wgpu::CompositeAlphaMode::Opaque,
294 wgpu::CompositeAlphaMode::Inherit,
295 ])?;
296
297 let alpha_mode = if config.transparent {
298 transparent_alpha_mode
299 } else {
300 opaque_alpha_mode
301 };
302
303 let device = Arc::clone(&context.device);
304 let max_texture_size = device.limits().max_texture_dimension_2d;
305
306 let requested_width = config.size.width.0 as u32;
307 let requested_height = config.size.height.0 as u32;
308 let clamped_width = requested_width.min(max_texture_size);
309 let clamped_height = requested_height.min(max_texture_size);
310
311 if clamped_width != requested_width || clamped_height != requested_height {
312 warn!(
313 "Requested surface size ({}, {}) exceeds maximum texture dimension {}. \
314 Clamping to ({}, {}). Window content may not fill the entire window.",
315 requested_width, requested_height, max_texture_size, clamped_width, clamped_height
316 );
317 }
318
319 let surface_config = wgpu::SurfaceConfiguration {
320 usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
321 format: surface_format,
322 width: clamped_width.max(1),
323 height: clamped_height.max(1),
324 present_mode: wgpu::PresentMode::Fifo,
325 desired_maximum_frame_latency: 2,
326 alpha_mode,
327 view_formats: vec![],
328 };
329 // Configure the surface immediately. The adapter selection process already validated
330 // that this adapter can successfully configure this surface.
331 surface.configure(&context.device, &surface_config);
332
333 let queue = Arc::clone(&context.queue);
334 let dual_source_blending = context.supports_dual_source_blending();
335
336 let rendering_params = RenderingParameters::new(&context.adapter, surface_format);
337 let bind_group_layouts = Self::create_bind_group_layouts(&device);
338 let pipelines = Self::create_pipelines(
339 &device,
340 &bind_group_layouts,
341 surface_format,
342 alpha_mode,
343 rendering_params.path_sample_count,
344 dual_source_blending,
345 );
346
347 let atlas_sampler = device.create_sampler(&wgpu::SamplerDescriptor {
348 label: Some("atlas_sampler"),
349 mag_filter: wgpu::FilterMode::Linear,
350 min_filter: wgpu::FilterMode::Linear,
351 ..Default::default()
352 });
353
354 let uniform_alignment = device.limits().min_uniform_buffer_offset_alignment as u64;
355 let globals_size = std::mem::size_of::<GlobalParams>() as u64;
356 let gamma_size = std::mem::size_of::<GammaParams>() as u64;
357 let path_globals_offset = globals_size.next_multiple_of(uniform_alignment);
358 let gamma_offset = (path_globals_offset + globals_size).next_multiple_of(uniform_alignment);
359
360 let globals_buffer = device.create_buffer(&wgpu::BufferDescriptor {
361 label: Some("globals_buffer"),
362 size: gamma_offset + gamma_size,
363 usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
364 mapped_at_creation: false,
365 });
366
367 let max_buffer_size = device.limits().max_buffer_size;
368 let storage_buffer_alignment = device.limits().min_storage_buffer_offset_alignment as u64;
369 let initial_instance_buffer_capacity = 2 * 1024 * 1024;
370 let instance_buffer = device.create_buffer(&wgpu::BufferDescriptor {
371 label: Some("instance_buffer"),
372 size: initial_instance_buffer_capacity,
373 usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
374 mapped_at_creation: false,
375 });
376
377 let globals_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
378 label: Some("globals_bind_group"),
379 layout: &bind_group_layouts.globals,
380 entries: &[
381 wgpu::BindGroupEntry {
382 binding: 0,
383 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
384 buffer: &globals_buffer,
385 offset: 0,
386 size: Some(NonZeroU64::new(globals_size).unwrap()),
387 }),
388 },
389 wgpu::BindGroupEntry {
390 binding: 1,
391 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
392 buffer: &globals_buffer,
393 offset: gamma_offset,
394 size: Some(NonZeroU64::new(gamma_size).unwrap()),
395 }),
396 },
397 ],
398 });
399
400 let path_globals_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
401 label: Some("path_globals_bind_group"),
402 layout: &bind_group_layouts.globals,
403 entries: &[
404 wgpu::BindGroupEntry {
405 binding: 0,
406 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
407 buffer: &globals_buffer,
408 offset: path_globals_offset,
409 size: Some(NonZeroU64::new(globals_size).unwrap()),
410 }),
411 },
412 wgpu::BindGroupEntry {
413 binding: 1,
414 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
415 buffer: &globals_buffer,
416 offset: gamma_offset,
417 size: Some(NonZeroU64::new(gamma_size).unwrap()),
418 }),
419 },
420 ],
421 });
422
423 let adapter_info = context.adapter.get_info();
424
425 let last_error: Arc<Mutex<Option<String>>> = Arc::new(Mutex::new(None));
426 let last_error_clone = Arc::clone(&last_error);
427 device.on_uncaptured_error(Arc::new(move |error| {
428 let mut guard = last_error_clone.lock().unwrap();
429 *guard = Some(error.to_string());
430 }));
431
432 let resources = WgpuResources {
433 device,
434 queue,
435 surface,
436 pipelines,
437 bind_group_layouts,
438 atlas_sampler,
439 globals_buffer,
440 globals_bind_group,
441 path_globals_bind_group,
442 instance_buffer,
443 // Defer intermediate texture creation to first draw call via ensure_intermediate_textures().
444 // This avoids panics when the device/surface is in an invalid state during initialization.
445 path_intermediate_texture: None,
446 path_intermediate_view: None,
447 path_msaa_texture: None,
448 path_msaa_view: None,
449 };
450
451 Ok(Self {
452 context: gpu_context,
453 compositor_gpu,
454 resources: Some(resources),
455 surface_config,
456 atlas,
457 path_globals_offset,
458 gamma_offset,
459 instance_buffer_capacity: initial_instance_buffer_capacity,
460 max_buffer_size,
461 storage_buffer_alignment,
462 rendering_params,
463 dual_source_blending,
464 adapter_info,
465 transparent_alpha_mode,
466 opaque_alpha_mode,
467 max_texture_size,
468 last_error,
469 failed_frame_count: 0,
470 device_lost: context.device_lost_flag(),
471 })
472 }
473
474 fn create_bind_group_layouts(device: &wgpu::Device) -> WgpuBindGroupLayouts {
475 let globals =
476 device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
477 label: Some("globals_layout"),
478 entries: &[
479 wgpu::BindGroupLayoutEntry {
480 binding: 0,
481 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
482 ty: wgpu::BindingType::Buffer {
483 ty: wgpu::BufferBindingType::Uniform,
484 has_dynamic_offset: false,
485 min_binding_size: NonZeroU64::new(
486 std::mem::size_of::<GlobalParams>() as u64
487 ),
488 },
489 count: None,
490 },
491 wgpu::BindGroupLayoutEntry {
492 binding: 1,
493 visibility: wgpu::ShaderStages::FRAGMENT,
494 ty: wgpu::BindingType::Buffer {
495 ty: wgpu::BufferBindingType::Uniform,
496 has_dynamic_offset: false,
497 min_binding_size: NonZeroU64::new(
498 std::mem::size_of::<GammaParams>() as u64
499 ),
500 },
501 count: None,
502 },
503 ],
504 });
505
506 let storage_buffer_entry = |binding: u32| wgpu::BindGroupLayoutEntry {
507 binding,
508 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
509 ty: wgpu::BindingType::Buffer {
510 ty: wgpu::BufferBindingType::Storage { read_only: true },
511 has_dynamic_offset: false,
512 min_binding_size: None,
513 },
514 count: None,
515 };
516
517 let instances = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
518 label: Some("instances_layout"),
519 entries: &[storage_buffer_entry(0)],
520 });
521
522 let instances_with_texture =
523 device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
524 label: Some("instances_with_texture_layout"),
525 entries: &[
526 storage_buffer_entry(0),
527 wgpu::BindGroupLayoutEntry {
528 binding: 1,
529 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
530 ty: wgpu::BindingType::Texture {
531 sample_type: wgpu::TextureSampleType::Float { filterable: true },
532 view_dimension: wgpu::TextureViewDimension::D2,
533 multisampled: false,
534 },
535 count: None,
536 },
537 wgpu::BindGroupLayoutEntry {
538 binding: 2,
539 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
540 ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
541 count: None,
542 },
543 ],
544 });
545
546 let surfaces = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
547 label: Some("surfaces_layout"),
548 entries: &[
549 wgpu::BindGroupLayoutEntry {
550 binding: 0,
551 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
552 ty: wgpu::BindingType::Buffer {
553 ty: wgpu::BufferBindingType::Uniform,
554 has_dynamic_offset: false,
555 min_binding_size: NonZeroU64::new(
556 std::mem::size_of::<SurfaceParams>() as u64
557 ),
558 },
559 count: None,
560 },
561 wgpu::BindGroupLayoutEntry {
562 binding: 1,
563 visibility: wgpu::ShaderStages::FRAGMENT,
564 ty: wgpu::BindingType::Texture {
565 sample_type: wgpu::TextureSampleType::Float { filterable: true },
566 view_dimension: wgpu::TextureViewDimension::D2,
567 multisampled: false,
568 },
569 count: None,
570 },
571 wgpu::BindGroupLayoutEntry {
572 binding: 2,
573 visibility: wgpu::ShaderStages::FRAGMENT,
574 ty: wgpu::BindingType::Texture {
575 sample_type: wgpu::TextureSampleType::Float { filterable: true },
576 view_dimension: wgpu::TextureViewDimension::D2,
577 multisampled: false,
578 },
579 count: None,
580 },
581 wgpu::BindGroupLayoutEntry {
582 binding: 3,
583 visibility: wgpu::ShaderStages::FRAGMENT,
584 ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
585 count: None,
586 },
587 ],
588 });
589
590 WgpuBindGroupLayouts {
591 globals,
592 instances,
593 instances_with_texture,
594 surfaces,
595 }
596 }
597
598 fn create_pipelines(
599 device: &wgpu::Device,
600 layouts: &WgpuBindGroupLayouts,
601 surface_format: wgpu::TextureFormat,
602 alpha_mode: wgpu::CompositeAlphaMode,
603 path_sample_count: u32,
604 dual_source_blending: bool,
605 ) -> WgpuPipelines {
606 let base_shader_source = include_str!("shaders.wgsl");
607 let shader_module = device.create_shader_module(wgpu::ShaderModuleDescriptor {
608 label: Some("gpui_shaders"),
609 source: wgpu::ShaderSource::Wgsl(std::borrow::Cow::Borrowed(base_shader_source)),
610 });
611
612 let subpixel_shader_source = include_str!("shaders_subpixel.wgsl");
613 let subpixel_shader_module = if dual_source_blending {
614 let combined = format!(
615 "enable dual_source_blending;\n{base_shader_source}\n{subpixel_shader_source}"
616 );
617 Some(device.create_shader_module(wgpu::ShaderModuleDescriptor {
618 label: Some("gpui_subpixel_shaders"),
619 source: wgpu::ShaderSource::Wgsl(std::borrow::Cow::Owned(combined)),
620 }))
621 } else {
622 None
623 };
624
625 let blend_mode = match alpha_mode {
626 wgpu::CompositeAlphaMode::PreMultiplied => {
627 wgpu::BlendState::PREMULTIPLIED_ALPHA_BLENDING
628 }
629 _ => wgpu::BlendState::ALPHA_BLENDING,
630 };
631
632 let color_target = wgpu::ColorTargetState {
633 format: surface_format,
634 blend: Some(blend_mode),
635 write_mask: wgpu::ColorWrites::ALL,
636 };
637
638 let create_pipeline = |name: &str,
639 vs_entry: &str,
640 fs_entry: &str,
641 globals_layout: &wgpu::BindGroupLayout,
642 data_layout: &wgpu::BindGroupLayout,
643 topology: wgpu::PrimitiveTopology,
644 color_targets: &[Option<wgpu::ColorTargetState>],
645 sample_count: u32,
646 module: &wgpu::ShaderModule| {
647 let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
648 label: Some(&format!("{name}_layout")),
649 bind_group_layouts: &[Some(globals_layout), Some(data_layout)],
650 immediate_size: 0,
651 });
652
653 device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
654 label: Some(name),
655 layout: Some(&pipeline_layout),
656 vertex: wgpu::VertexState {
657 module,
658 entry_point: Some(vs_entry),
659 buffers: &[],
660 compilation_options: wgpu::PipelineCompilationOptions::default(),
661 },
662 fragment: Some(wgpu::FragmentState {
663 module,
664 entry_point: Some(fs_entry),
665 targets: color_targets,
666 compilation_options: wgpu::PipelineCompilationOptions::default(),
667 }),
668 primitive: wgpu::PrimitiveState {
669 topology,
670 strip_index_format: None,
671 front_face: wgpu::FrontFace::Ccw,
672 cull_mode: None,
673 polygon_mode: wgpu::PolygonMode::Fill,
674 unclipped_depth: false,
675 conservative: false,
676 },
677 depth_stencil: None,
678 multisample: wgpu::MultisampleState {
679 count: sample_count,
680 mask: !0,
681 alpha_to_coverage_enabled: false,
682 },
683 multiview_mask: None,
684 cache: None,
685 })
686 };
687
688 let quads = create_pipeline(
689 "quads",
690 "vs_quad",
691 "fs_quad",
692 &layouts.globals,
693 &layouts.instances,
694 wgpu::PrimitiveTopology::TriangleStrip,
695 &[Some(color_target.clone())],
696 1,
697 &shader_module,
698 );
699
700 let shadows = create_pipeline(
701 "shadows",
702 "vs_shadow",
703 "fs_shadow",
704 &layouts.globals,
705 &layouts.instances,
706 wgpu::PrimitiveTopology::TriangleStrip,
707 &[Some(color_target.clone())],
708 1,
709 &shader_module,
710 );
711
712 let path_rasterization = create_pipeline(
713 "path_rasterization",
714 "vs_path_rasterization",
715 "fs_path_rasterization",
716 &layouts.globals,
717 &layouts.instances,
718 wgpu::PrimitiveTopology::TriangleList,
719 &[Some(wgpu::ColorTargetState {
720 format: surface_format,
721 blend: Some(wgpu::BlendState::PREMULTIPLIED_ALPHA_BLENDING),
722 write_mask: wgpu::ColorWrites::ALL,
723 })],
724 path_sample_count,
725 &shader_module,
726 );
727
728 let paths_blend = wgpu::BlendState {
729 color: wgpu::BlendComponent {
730 src_factor: wgpu::BlendFactor::One,
731 dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
732 operation: wgpu::BlendOperation::Add,
733 },
734 alpha: wgpu::BlendComponent {
735 src_factor: wgpu::BlendFactor::One,
736 dst_factor: wgpu::BlendFactor::One,
737 operation: wgpu::BlendOperation::Add,
738 },
739 };
740
741 let paths = create_pipeline(
742 "paths",
743 "vs_path",
744 "fs_path",
745 &layouts.globals,
746 &layouts.instances_with_texture,
747 wgpu::PrimitiveTopology::TriangleStrip,
748 &[Some(wgpu::ColorTargetState {
749 format: surface_format,
750 blend: Some(paths_blend),
751 write_mask: wgpu::ColorWrites::ALL,
752 })],
753 1,
754 &shader_module,
755 );
756
757 let underlines = create_pipeline(
758 "underlines",
759 "vs_underline",
760 "fs_underline",
761 &layouts.globals,
762 &layouts.instances,
763 wgpu::PrimitiveTopology::TriangleStrip,
764 &[Some(color_target.clone())],
765 1,
766 &shader_module,
767 );
768
769 let mono_sprites = create_pipeline(
770 "mono_sprites",
771 "vs_mono_sprite",
772 "fs_mono_sprite",
773 &layouts.globals,
774 &layouts.instances_with_texture,
775 wgpu::PrimitiveTopology::TriangleStrip,
776 &[Some(color_target.clone())],
777 1,
778 &shader_module,
779 );
780
781 let subpixel_sprites = if let Some(subpixel_module) = &subpixel_shader_module {
782 let subpixel_blend = wgpu::BlendState {
783 color: wgpu::BlendComponent {
784 src_factor: wgpu::BlendFactor::Src1,
785 dst_factor: wgpu::BlendFactor::OneMinusSrc1,
786 operation: wgpu::BlendOperation::Add,
787 },
788 alpha: wgpu::BlendComponent {
789 src_factor: wgpu::BlendFactor::One,
790 dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
791 operation: wgpu::BlendOperation::Add,
792 },
793 };
794
795 Some(create_pipeline(
796 "subpixel_sprites",
797 "vs_subpixel_sprite",
798 "fs_subpixel_sprite",
799 &layouts.globals,
800 &layouts.instances_with_texture,
801 wgpu::PrimitiveTopology::TriangleStrip,
802 &[Some(wgpu::ColorTargetState {
803 format: surface_format,
804 blend: Some(subpixel_blend),
805 write_mask: wgpu::ColorWrites::COLOR,
806 })],
807 1,
808 subpixel_module,
809 ))
810 } else {
811 None
812 };
813
814 let poly_sprites = create_pipeline(
815 "poly_sprites",
816 "vs_poly_sprite",
817 "fs_poly_sprite",
818 &layouts.globals,
819 &layouts.instances_with_texture,
820 wgpu::PrimitiveTopology::TriangleStrip,
821 &[Some(color_target.clone())],
822 1,
823 &shader_module,
824 );
825
826 let surfaces = create_pipeline(
827 "surfaces",
828 "vs_surface",
829 "fs_surface",
830 &layouts.globals,
831 &layouts.surfaces,
832 wgpu::PrimitiveTopology::TriangleStrip,
833 &[Some(color_target)],
834 1,
835 &shader_module,
836 );
837
838 WgpuPipelines {
839 quads,
840 shadows,
841 path_rasterization,
842 paths,
843 underlines,
844 mono_sprites,
845 subpixel_sprites,
846 poly_sprites,
847 surfaces,
848 }
849 }
850
851 fn create_path_intermediate(
852 device: &wgpu::Device,
853 format: wgpu::TextureFormat,
854 width: u32,
855 height: u32,
856 ) -> (wgpu::Texture, wgpu::TextureView) {
857 let texture = device.create_texture(&wgpu::TextureDescriptor {
858 label: Some("path_intermediate"),
859 size: wgpu::Extent3d {
860 width: width.max(1),
861 height: height.max(1),
862 depth_or_array_layers: 1,
863 },
864 mip_level_count: 1,
865 sample_count: 1,
866 dimension: wgpu::TextureDimension::D2,
867 format,
868 usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::TEXTURE_BINDING,
869 view_formats: &[],
870 });
871 let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
872 (texture, view)
873 }
874
875 fn create_msaa_if_needed(
876 device: &wgpu::Device,
877 format: wgpu::TextureFormat,
878 width: u32,
879 height: u32,
880 sample_count: u32,
881 ) -> Option<(wgpu::Texture, wgpu::TextureView)> {
882 if sample_count <= 1 {
883 return None;
884 }
885 let texture = device.create_texture(&wgpu::TextureDescriptor {
886 label: Some("path_msaa"),
887 size: wgpu::Extent3d {
888 width: width.max(1),
889 height: height.max(1),
890 depth_or_array_layers: 1,
891 },
892 mip_level_count: 1,
893 sample_count,
894 dimension: wgpu::TextureDimension::D2,
895 format,
896 usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
897 view_formats: &[],
898 });
899 let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
900 Some((texture, view))
901 }
902
903 pub fn update_drawable_size(&mut self, size: Size<DevicePixels>) {
904 let width = size.width.0 as u32;
905 let height = size.height.0 as u32;
906
907 if width != self.surface_config.width || height != self.surface_config.height {
908 let clamped_width = width.min(self.max_texture_size);
909 let clamped_height = height.min(self.max_texture_size);
910
911 if clamped_width != width || clamped_height != height {
912 warn!(
913 "Requested surface size ({}, {}) exceeds maximum texture dimension {}. \
914 Clamping to ({}, {}). Window content may not fill the entire window.",
915 width, height, self.max_texture_size, clamped_width, clamped_height
916 );
917 }
918
919 self.surface_config.width = clamped_width.max(1);
920 self.surface_config.height = clamped_height.max(1);
921 let surface_config = self.surface_config.clone();
922
923 let resources = self.resources_mut();
924
925 // Wait for any in-flight GPU work to complete before destroying textures
926 if let Err(e) = resources.device.poll(wgpu::PollType::Wait {
927 submission_index: None,
928 timeout: None,
929 }) {
930 warn!("Failed to poll device during resize: {e:?}");
931 }
932
933 // Destroy old textures before allocating new ones to avoid GPU memory spikes
934 if let Some(ref texture) = resources.path_intermediate_texture {
935 texture.destroy();
936 }
937 if let Some(ref texture) = resources.path_msaa_texture {
938 texture.destroy();
939 }
940
941 resources
942 .surface
943 .configure(&resources.device, &surface_config);
944
945 // Invalidate intermediate textures - they will be lazily recreated
946 // in draw() after we confirm the surface is healthy. This avoids
947 // panics when the device/surface is in an invalid state during resize.
948 resources.path_intermediate_texture = None;
949 resources.path_intermediate_view = None;
950 resources.path_msaa_texture = None;
951 resources.path_msaa_view = None;
952 }
953 }
954
955 fn ensure_intermediate_textures(&mut self) {
956 if self.resources().path_intermediate_texture.is_some() {
957 return;
958 }
959
960 let format = self.surface_config.format;
961 let width = self.surface_config.width;
962 let height = self.surface_config.height;
963 let path_sample_count = self.rendering_params.path_sample_count;
964 let resources = self.resources_mut();
965
966 let (t, v) = Self::create_path_intermediate(&resources.device, format, width, height);
967 resources.path_intermediate_texture = Some(t);
968 resources.path_intermediate_view = Some(v);
969
970 let (path_msaa_texture, path_msaa_view) = Self::create_msaa_if_needed(
971 &resources.device,
972 format,
973 width,
974 height,
975 path_sample_count,
976 )
977 .map(|(t, v)| (Some(t), Some(v)))
978 .unwrap_or((None, None));
979 resources.path_msaa_texture = path_msaa_texture;
980 resources.path_msaa_view = path_msaa_view;
981 }
982
983 pub fn update_transparency(&mut self, transparent: bool) {
984 let new_alpha_mode = if transparent {
985 self.transparent_alpha_mode
986 } else {
987 self.opaque_alpha_mode
988 };
989
990 if new_alpha_mode != self.surface_config.alpha_mode {
991 self.surface_config.alpha_mode = new_alpha_mode;
992 let surface_config = self.surface_config.clone();
993 let path_sample_count = self.rendering_params.path_sample_count;
994 let dual_source_blending = self.dual_source_blending;
995 let resources = self.resources_mut();
996 resources
997 .surface
998 .configure(&resources.device, &surface_config);
999 resources.pipelines = Self::create_pipelines(
1000 &resources.device,
1001 &resources.bind_group_layouts,
1002 surface_config.format,
1003 surface_config.alpha_mode,
1004 path_sample_count,
1005 dual_source_blending,
1006 );
1007 }
1008 }
1009
1010 #[allow(dead_code)]
1011 pub fn viewport_size(&self) -> Size<DevicePixels> {
1012 Size {
1013 width: DevicePixels(self.surface_config.width as i32),
1014 height: DevicePixels(self.surface_config.height as i32),
1015 }
1016 }
1017
1018 pub fn sprite_atlas(&self) -> &Arc<WgpuAtlas> {
1019 &self.atlas
1020 }
1021
1022 pub fn supports_dual_source_blending(&self) -> bool {
1023 self.dual_source_blending
1024 }
1025
1026 pub fn gpu_specs(&self) -> GpuSpecs {
1027 GpuSpecs {
1028 is_software_emulated: self.adapter_info.device_type == wgpu::DeviceType::Cpu,
1029 device_name: self.adapter_info.name.clone(),
1030 driver_name: self.adapter_info.driver.clone(),
1031 driver_info: self.adapter_info.driver_info.clone(),
1032 }
1033 }
1034
1035 pub fn max_texture_size(&self) -> u32 {
1036 self.max_texture_size
1037 }
1038
1039 pub fn draw(&mut self, scene: &Scene) {
1040 let last_error = self.last_error.lock().unwrap().take();
1041 if let Some(error) = last_error {
1042 self.failed_frame_count += 1;
1043 log::error!(
1044 "GPU error during frame (failure {} of 20): {error}",
1045 self.failed_frame_count
1046 );
1047 if self.failed_frame_count > 20 {
1048 panic!("Too many consecutive GPU errors. Last error: {error}");
1049 }
1050 } else {
1051 self.failed_frame_count = 0;
1052 }
1053
1054 self.atlas.before_frame();
1055
1056 let frame = match self.resources().surface.get_current_texture() {
1057 wgpu::CurrentSurfaceTexture::Success(frame) => frame,
1058 wgpu::CurrentSurfaceTexture::Suboptimal(frame) => {
1059 // Textures must be destroyed before the surface can be reconfigured.
1060 drop(frame);
1061 let surface_config = self.surface_config.clone();
1062 let resources = self.resources_mut();
1063 resources
1064 .surface
1065 .configure(&resources.device, &surface_config);
1066 return;
1067 }
1068 wgpu::CurrentSurfaceTexture::Lost | wgpu::CurrentSurfaceTexture::Outdated => {
1069 let surface_config = self.surface_config.clone();
1070 let resources = self.resources_mut();
1071 resources
1072 .surface
1073 .configure(&resources.device, &surface_config);
1074 return;
1075 }
1076 wgpu::CurrentSurfaceTexture::Timeout | wgpu::CurrentSurfaceTexture::Occluded => {
1077 return;
1078 }
1079 wgpu::CurrentSurfaceTexture::Validation => {
1080 *self.last_error.lock().unwrap() =
1081 Some("Surface texture validation error".to_string());
1082 return;
1083 }
1084 };
1085
1086 // Now that we know the surface is healthy, ensure intermediate textures exist
1087 self.ensure_intermediate_textures();
1088
1089 let frame_view = frame
1090 .texture
1091 .create_view(&wgpu::TextureViewDescriptor::default());
1092
1093 let gamma_params = GammaParams {
1094 gamma_ratios: self.rendering_params.gamma_ratios,
1095 grayscale_enhanced_contrast: self.rendering_params.grayscale_enhanced_contrast,
1096 subpixel_enhanced_contrast: self.rendering_params.subpixel_enhanced_contrast,
1097 _pad: [0.0; 2],
1098 };
1099
1100 let globals = GlobalParams {
1101 viewport_size: [
1102 self.surface_config.width as f32,
1103 self.surface_config.height as f32,
1104 ],
1105 premultiplied_alpha: if self.surface_config.alpha_mode
1106 == wgpu::CompositeAlphaMode::PreMultiplied
1107 {
1108 1
1109 } else {
1110 0
1111 },
1112 pad: 0,
1113 };
1114
1115 let path_globals = GlobalParams {
1116 premultiplied_alpha: 0,
1117 ..globals
1118 };
1119
1120 {
1121 let resources = self.resources();
1122 resources.queue.write_buffer(
1123 &resources.globals_buffer,
1124 0,
1125 bytemuck::bytes_of(&globals),
1126 );
1127 resources.queue.write_buffer(
1128 &resources.globals_buffer,
1129 self.path_globals_offset,
1130 bytemuck::bytes_of(&path_globals),
1131 );
1132 resources.queue.write_buffer(
1133 &resources.globals_buffer,
1134 self.gamma_offset,
1135 bytemuck::bytes_of(&gamma_params),
1136 );
1137 }
1138
1139 loop {
1140 let mut instance_offset: u64 = 0;
1141 let mut overflow = false;
1142
1143 let mut encoder =
1144 self.resources()
1145 .device
1146 .create_command_encoder(&wgpu::CommandEncoderDescriptor {
1147 label: Some("main_encoder"),
1148 });
1149
1150 {
1151 let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
1152 label: Some("main_pass"),
1153 color_attachments: &[Some(wgpu::RenderPassColorAttachment {
1154 view: &frame_view,
1155 resolve_target: None,
1156 ops: wgpu::Operations {
1157 load: wgpu::LoadOp::Clear(wgpu::Color::TRANSPARENT),
1158 store: wgpu::StoreOp::Store,
1159 },
1160 depth_slice: None,
1161 })],
1162 depth_stencil_attachment: None,
1163 ..Default::default()
1164 });
1165
1166 for batch in scene.batches() {
1167 let ok = match batch {
1168 PrimitiveBatch::Quads(range) => {
1169 self.draw_quads(&scene.quads[range], &mut instance_offset, &mut pass)
1170 }
1171 PrimitiveBatch::Shadows(range) => self.draw_shadows(
1172 &scene.shadows[range],
1173 &mut instance_offset,
1174 &mut pass,
1175 ),
1176 PrimitiveBatch::Paths(range) => {
1177 let paths = &scene.paths[range];
1178 if paths.is_empty() {
1179 continue;
1180 }
1181
1182 drop(pass);
1183
1184 let did_draw = self.draw_paths_to_intermediate(
1185 &mut encoder,
1186 paths,
1187 &mut instance_offset,
1188 );
1189
1190 pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
1191 label: Some("main_pass_continued"),
1192 color_attachments: &[Some(wgpu::RenderPassColorAttachment {
1193 view: &frame_view,
1194 resolve_target: None,
1195 ops: wgpu::Operations {
1196 load: wgpu::LoadOp::Load,
1197 store: wgpu::StoreOp::Store,
1198 },
1199 depth_slice: None,
1200 })],
1201 depth_stencil_attachment: None,
1202 ..Default::default()
1203 });
1204
1205 if did_draw {
1206 self.draw_paths_from_intermediate(
1207 paths,
1208 &mut instance_offset,
1209 &mut pass,
1210 )
1211 } else {
1212 false
1213 }
1214 }
1215 PrimitiveBatch::Underlines(range) => self.draw_underlines(
1216 &scene.underlines[range],
1217 &mut instance_offset,
1218 &mut pass,
1219 ),
1220 PrimitiveBatch::MonochromeSprites { texture_id, range } => self
1221 .draw_monochrome_sprites(
1222 &scene.monochrome_sprites[range],
1223 texture_id,
1224 &mut instance_offset,
1225 &mut pass,
1226 ),
1227 PrimitiveBatch::SubpixelSprites { texture_id, range } => self
1228 .draw_subpixel_sprites(
1229 &scene.subpixel_sprites[range],
1230 texture_id,
1231 &mut instance_offset,
1232 &mut pass,
1233 ),
1234 PrimitiveBatch::PolychromeSprites { texture_id, range } => self
1235 .draw_polychrome_sprites(
1236 &scene.polychrome_sprites[range],
1237 texture_id,
1238 &mut instance_offset,
1239 &mut pass,
1240 ),
1241 PrimitiveBatch::Surfaces(_surfaces) => {
1242 // Surfaces are macOS-only for video playback
1243 // Not implemented for Linux/wgpu
1244 true
1245 }
1246 };
1247 if !ok {
1248 overflow = true;
1249 break;
1250 }
1251 }
1252 }
1253
1254 if overflow {
1255 drop(encoder);
1256 if self.instance_buffer_capacity >= self.max_buffer_size {
1257 log::error!(
1258 "instance buffer size grew too large: {}",
1259 self.instance_buffer_capacity
1260 );
1261 frame.present();
1262 return;
1263 }
1264 self.grow_instance_buffer();
1265 continue;
1266 }
1267
1268 self.resources()
1269 .queue
1270 .submit(std::iter::once(encoder.finish()));
1271 frame.present();
1272 return;
1273 }
1274 }
1275
1276 fn draw_quads(
1277 &self,
1278 quads: &[Quad],
1279 instance_offset: &mut u64,
1280 pass: &mut wgpu::RenderPass<'_>,
1281 ) -> bool {
1282 let data = unsafe { Self::instance_bytes(quads) };
1283 self.draw_instances(
1284 data,
1285 quads.len() as u32,
1286 &self.resources().pipelines.quads,
1287 instance_offset,
1288 pass,
1289 )
1290 }
1291
1292 fn draw_shadows(
1293 &self,
1294 shadows: &[Shadow],
1295 instance_offset: &mut u64,
1296 pass: &mut wgpu::RenderPass<'_>,
1297 ) -> bool {
1298 let data = unsafe { Self::instance_bytes(shadows) };
1299 self.draw_instances(
1300 data,
1301 shadows.len() as u32,
1302 &self.resources().pipelines.shadows,
1303 instance_offset,
1304 pass,
1305 )
1306 }
1307
1308 fn draw_underlines(
1309 &self,
1310 underlines: &[Underline],
1311 instance_offset: &mut u64,
1312 pass: &mut wgpu::RenderPass<'_>,
1313 ) -> bool {
1314 let data = unsafe { Self::instance_bytes(underlines) };
1315 self.draw_instances(
1316 data,
1317 underlines.len() as u32,
1318 &self.resources().pipelines.underlines,
1319 instance_offset,
1320 pass,
1321 )
1322 }
1323
1324 fn draw_monochrome_sprites(
1325 &self,
1326 sprites: &[MonochromeSprite],
1327 texture_id: AtlasTextureId,
1328 instance_offset: &mut u64,
1329 pass: &mut wgpu::RenderPass<'_>,
1330 ) -> bool {
1331 let tex_info = self.atlas.get_texture_info(texture_id);
1332 let data = unsafe { Self::instance_bytes(sprites) };
1333 self.draw_instances_with_texture(
1334 data,
1335 sprites.len() as u32,
1336 &tex_info.view,
1337 &self.resources().pipelines.mono_sprites,
1338 instance_offset,
1339 pass,
1340 )
1341 }
1342
1343 fn draw_subpixel_sprites(
1344 &self,
1345 sprites: &[SubpixelSprite],
1346 texture_id: AtlasTextureId,
1347 instance_offset: &mut u64,
1348 pass: &mut wgpu::RenderPass<'_>,
1349 ) -> bool {
1350 let tex_info = self.atlas.get_texture_info(texture_id);
1351 let data = unsafe { Self::instance_bytes(sprites) };
1352 let resources = self.resources();
1353 let pipeline = resources
1354 .pipelines
1355 .subpixel_sprites
1356 .as_ref()
1357 .unwrap_or(&resources.pipelines.mono_sprites);
1358 self.draw_instances_with_texture(
1359 data,
1360 sprites.len() as u32,
1361 &tex_info.view,
1362 pipeline,
1363 instance_offset,
1364 pass,
1365 )
1366 }
1367
1368 fn draw_polychrome_sprites(
1369 &self,
1370 sprites: &[PolychromeSprite],
1371 texture_id: AtlasTextureId,
1372 instance_offset: &mut u64,
1373 pass: &mut wgpu::RenderPass<'_>,
1374 ) -> bool {
1375 let tex_info = self.atlas.get_texture_info(texture_id);
1376 let data = unsafe { Self::instance_bytes(sprites) };
1377 self.draw_instances_with_texture(
1378 data,
1379 sprites.len() as u32,
1380 &tex_info.view,
1381 &self.resources().pipelines.poly_sprites,
1382 instance_offset,
1383 pass,
1384 )
1385 }
1386
1387 fn draw_instances(
1388 &self,
1389 data: &[u8],
1390 instance_count: u32,
1391 pipeline: &wgpu::RenderPipeline,
1392 instance_offset: &mut u64,
1393 pass: &mut wgpu::RenderPass<'_>,
1394 ) -> bool {
1395 if instance_count == 0 {
1396 return true;
1397 }
1398 let Some((offset, size)) = self.write_to_instance_buffer(instance_offset, data) else {
1399 return false;
1400 };
1401 let resources = self.resources();
1402 let bind_group = resources
1403 .device
1404 .create_bind_group(&wgpu::BindGroupDescriptor {
1405 label: None,
1406 layout: &resources.bind_group_layouts.instances,
1407 entries: &[wgpu::BindGroupEntry {
1408 binding: 0,
1409 resource: self.instance_binding(offset, size),
1410 }],
1411 });
1412 pass.set_pipeline(pipeline);
1413 pass.set_bind_group(0, &resources.globals_bind_group, &[]);
1414 pass.set_bind_group(1, &bind_group, &[]);
1415 pass.draw(0..4, 0..instance_count);
1416 true
1417 }
1418
1419 fn draw_instances_with_texture(
1420 &self,
1421 data: &[u8],
1422 instance_count: u32,
1423 texture_view: &wgpu::TextureView,
1424 pipeline: &wgpu::RenderPipeline,
1425 instance_offset: &mut u64,
1426 pass: &mut wgpu::RenderPass<'_>,
1427 ) -> bool {
1428 if instance_count == 0 {
1429 return true;
1430 }
1431 let Some((offset, size)) = self.write_to_instance_buffer(instance_offset, data) else {
1432 return false;
1433 };
1434 let resources = self.resources();
1435 let bind_group = resources
1436 .device
1437 .create_bind_group(&wgpu::BindGroupDescriptor {
1438 label: None,
1439 layout: &resources.bind_group_layouts.instances_with_texture,
1440 entries: &[
1441 wgpu::BindGroupEntry {
1442 binding: 0,
1443 resource: self.instance_binding(offset, size),
1444 },
1445 wgpu::BindGroupEntry {
1446 binding: 1,
1447 resource: wgpu::BindingResource::TextureView(texture_view),
1448 },
1449 wgpu::BindGroupEntry {
1450 binding: 2,
1451 resource: wgpu::BindingResource::Sampler(&resources.atlas_sampler),
1452 },
1453 ],
1454 });
1455 pass.set_pipeline(pipeline);
1456 pass.set_bind_group(0, &resources.globals_bind_group, &[]);
1457 pass.set_bind_group(1, &bind_group, &[]);
1458 pass.draw(0..4, 0..instance_count);
1459 true
1460 }
1461
1462 unsafe fn instance_bytes<T>(instances: &[T]) -> &[u8] {
1463 unsafe {
1464 std::slice::from_raw_parts(
1465 instances.as_ptr() as *const u8,
1466 std::mem::size_of_val(instances),
1467 )
1468 }
1469 }
1470
1471 fn draw_paths_from_intermediate(
1472 &self,
1473 paths: &[Path<ScaledPixels>],
1474 instance_offset: &mut u64,
1475 pass: &mut wgpu::RenderPass<'_>,
1476 ) -> bool {
1477 let first_path = &paths[0];
1478 let sprites: Vec<PathSprite> = if paths.last().map(|p| &p.order) == Some(&first_path.order)
1479 {
1480 paths
1481 .iter()
1482 .map(|p| PathSprite {
1483 bounds: p.clipped_bounds(),
1484 })
1485 .collect()
1486 } else {
1487 let mut bounds = first_path.clipped_bounds();
1488 for path in paths.iter().skip(1) {
1489 bounds = bounds.union(&path.clipped_bounds());
1490 }
1491 vec![PathSprite { bounds }]
1492 };
1493
1494 let resources = self.resources();
1495 let Some(path_intermediate_view) = resources.path_intermediate_view.as_ref() else {
1496 return true;
1497 };
1498
1499 let sprite_data = unsafe { Self::instance_bytes(&sprites) };
1500 self.draw_instances_with_texture(
1501 sprite_data,
1502 sprites.len() as u32,
1503 path_intermediate_view,
1504 &resources.pipelines.paths,
1505 instance_offset,
1506 pass,
1507 )
1508 }
1509
1510 fn draw_paths_to_intermediate(
1511 &self,
1512 encoder: &mut wgpu::CommandEncoder,
1513 paths: &[Path<ScaledPixels>],
1514 instance_offset: &mut u64,
1515 ) -> bool {
1516 let mut vertices = Vec::new();
1517 for path in paths {
1518 let bounds = path.clipped_bounds();
1519 vertices.extend(path.vertices.iter().map(|v| PathRasterizationVertex {
1520 xy_position: v.xy_position,
1521 st_position: v.st_position,
1522 color: path.color,
1523 bounds,
1524 }));
1525 }
1526
1527 if vertices.is_empty() {
1528 return true;
1529 }
1530
1531 let vertex_data = unsafe { Self::instance_bytes(&vertices) };
1532 let Some((vertex_offset, vertex_size)) =
1533 self.write_to_instance_buffer(instance_offset, vertex_data)
1534 else {
1535 return false;
1536 };
1537
1538 let resources = self.resources();
1539 let data_bind_group = resources
1540 .device
1541 .create_bind_group(&wgpu::BindGroupDescriptor {
1542 label: Some("path_rasterization_bind_group"),
1543 layout: &resources.bind_group_layouts.instances,
1544 entries: &[wgpu::BindGroupEntry {
1545 binding: 0,
1546 resource: self.instance_binding(vertex_offset, vertex_size),
1547 }],
1548 });
1549
1550 let Some(path_intermediate_view) = resources.path_intermediate_view.as_ref() else {
1551 return true;
1552 };
1553
1554 let (target_view, resolve_target) = if let Some(ref msaa_view) = resources.path_msaa_view {
1555 (msaa_view, Some(path_intermediate_view))
1556 } else {
1557 (path_intermediate_view, None)
1558 };
1559
1560 {
1561 let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
1562 label: Some("path_rasterization_pass"),
1563 color_attachments: &[Some(wgpu::RenderPassColorAttachment {
1564 view: target_view,
1565 resolve_target,
1566 ops: wgpu::Operations {
1567 load: wgpu::LoadOp::Clear(wgpu::Color::TRANSPARENT),
1568 store: wgpu::StoreOp::Store,
1569 },
1570 depth_slice: None,
1571 })],
1572 depth_stencil_attachment: None,
1573 ..Default::default()
1574 });
1575
1576 pass.set_pipeline(&resources.pipelines.path_rasterization);
1577 pass.set_bind_group(0, &resources.path_globals_bind_group, &[]);
1578 pass.set_bind_group(1, &data_bind_group, &[]);
1579 pass.draw(0..vertices.len() as u32, 0..1);
1580 }
1581
1582 true
1583 }
1584
1585 fn grow_instance_buffer(&mut self) {
1586 let new_capacity = (self.instance_buffer_capacity * 2).min(self.max_buffer_size);
1587 log::info!("increased instance buffer size to {}", new_capacity);
1588 let resources = self.resources_mut();
1589 resources.instance_buffer = resources.device.create_buffer(&wgpu::BufferDescriptor {
1590 label: Some("instance_buffer"),
1591 size: new_capacity,
1592 usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
1593 mapped_at_creation: false,
1594 });
1595 self.instance_buffer_capacity = new_capacity;
1596 }
1597
1598 fn write_to_instance_buffer(
1599 &self,
1600 instance_offset: &mut u64,
1601 data: &[u8],
1602 ) -> Option<(u64, NonZeroU64)> {
1603 let offset = (*instance_offset).next_multiple_of(self.storage_buffer_alignment);
1604 let size = (data.len() as u64).max(16);
1605 if offset + size > self.instance_buffer_capacity {
1606 return None;
1607 }
1608 let resources = self.resources();
1609 resources
1610 .queue
1611 .write_buffer(&resources.instance_buffer, offset, data);
1612 *instance_offset = offset + size;
1613 Some((offset, NonZeroU64::new(size).expect("size is at least 16")))
1614 }
1615
1616 fn instance_binding(&self, offset: u64, size: NonZeroU64) -> wgpu::BindingResource<'_> {
1617 wgpu::BindingResource::Buffer(wgpu::BufferBinding {
1618 buffer: &self.resources().instance_buffer,
1619 offset,
1620 size: Some(size),
1621 })
1622 }
1623
1624 pub fn destroy(&mut self) {
1625 // Release surface-bound GPU resources eagerly so the underlying native
1626 // window can be destroyed before the renderer itself is dropped.
1627 self.resources.take();
1628 }
1629
1630 /// Returns true if the GPU device was lost and recovery is needed.
1631 pub fn device_lost(&self) -> bool {
1632 self.device_lost.load(std::sync::atomic::Ordering::SeqCst)
1633 }
1634
1635 /// Recovers from a lost GPU device by recreating the renderer with a new context.
1636 ///
1637 /// Call this after detecting `device_lost()` returns true.
1638 ///
1639 /// This method coordinates recovery across multiple windows:
1640 /// - The first window to call this will recreate the shared context
1641 /// - Subsequent windows will adopt the already-recovered context
1642 #[cfg(not(target_family = "wasm"))]
1643 pub fn recover<W>(&mut self, window: &W) -> anyhow::Result<()>
1644 where
1645 W: HasWindowHandle + HasDisplayHandle + std::fmt::Debug + Send + Sync + Clone + 'static,
1646 {
1647 let gpu_context = self.context.as_ref().expect("recover requires gpu_context");
1648
1649 // Check if another window already recovered the context
1650 let needs_new_context = gpu_context
1651 .borrow()
1652 .as_ref()
1653 .is_none_or(|ctx| ctx.device_lost());
1654
1655 let window_handle = window
1656 .window_handle()
1657 .map_err(|e| anyhow::anyhow!("Failed to get window handle: {e}"))?;
1658
1659 let surface = if needs_new_context {
1660 log::warn!("GPU device lost, recreating context...");
1661
1662 // Drop old resources to release Arc<Device>/Arc<Queue> and GPU resources
1663 self.resources = None;
1664 *gpu_context.borrow_mut() = None;
1665
1666 // Wait for GPU driver to stabilize (350ms copied from windows :shrug:)
1667 std::thread::sleep(std::time::Duration::from_millis(350));
1668
1669 let instance = WgpuContext::instance(Box::new(window.clone()));
1670 let surface = create_surface(&instance, window_handle.as_raw())?;
1671 let new_context = WgpuContext::new(instance, &surface, self.compositor_gpu)?;
1672 *gpu_context.borrow_mut() = Some(new_context);
1673 surface
1674 } else {
1675 let ctx_ref = gpu_context.borrow();
1676 let instance = &ctx_ref.as_ref().unwrap().instance;
1677 create_surface(instance, window_handle.as_raw())?
1678 };
1679
1680 let config = WgpuSurfaceConfig {
1681 size: gpui::Size {
1682 width: gpui::DevicePixels(self.surface_config.width as i32),
1683 height: gpui::DevicePixels(self.surface_config.height as i32),
1684 },
1685 transparent: self.surface_config.alpha_mode != wgpu::CompositeAlphaMode::Opaque,
1686 };
1687 let gpu_context = Rc::clone(gpu_context);
1688 let ctx_ref = gpu_context.borrow();
1689 let context = ctx_ref.as_ref().expect("context should exist");
1690
1691 self.resources = None;
1692 self.atlas
1693 .handle_device_lost(Arc::clone(&context.device), Arc::clone(&context.queue));
1694
1695 *self = Self::new_internal(
1696 Some(gpu_context.clone()),
1697 context,
1698 surface,
1699 config,
1700 self.compositor_gpu,
1701 self.atlas.clone(),
1702 )?;
1703
1704 log::info!("GPU recovery complete");
1705 Ok(())
1706 }
1707}
1708
1709#[cfg(not(target_family = "wasm"))]
1710fn create_surface(
1711 instance: &wgpu::Instance,
1712 raw_window_handle: raw_window_handle::RawWindowHandle,
1713) -> anyhow::Result<wgpu::Surface<'static>> {
1714 unsafe {
1715 instance
1716 .create_surface_unsafe(wgpu::SurfaceTargetUnsafe::RawHandle {
1717 // Fall back to the display handle already provided via InstanceDescriptor::display.
1718 raw_display_handle: None,
1719 raw_window_handle,
1720 })
1721 .map_err(|e| anyhow::anyhow!("{e}"))
1722 }
1723}
1724
1725struct RenderingParameters {
1726 path_sample_count: u32,
1727 gamma_ratios: [f32; 4],
1728 grayscale_enhanced_contrast: f32,
1729 subpixel_enhanced_contrast: f32,
1730}
1731
1732impl RenderingParameters {
1733 fn new(adapter: &wgpu::Adapter, surface_format: wgpu::TextureFormat) -> Self {
1734 use std::env;
1735
1736 let format_features = adapter.get_texture_format_features(surface_format);
1737 let path_sample_count = [4, 2, 1]
1738 .into_iter()
1739 .find(|&n| format_features.flags.sample_count_supported(n))
1740 .unwrap_or(1);
1741
1742 let gamma = env::var("ZED_FONTS_GAMMA")
1743 .ok()
1744 .and_then(|v| v.parse().ok())
1745 .unwrap_or(1.8_f32)
1746 .clamp(1.0, 2.2);
1747 let gamma_ratios = get_gamma_correction_ratios(gamma);
1748
1749 let grayscale_enhanced_contrast = env::var("ZED_FONTS_GRAYSCALE_ENHANCED_CONTRAST")
1750 .ok()
1751 .and_then(|v| v.parse().ok())
1752 .unwrap_or(1.0_f32)
1753 .max(0.0);
1754
1755 let subpixel_enhanced_contrast = env::var("ZED_FONTS_SUBPIXEL_ENHANCED_CONTRAST")
1756 .ok()
1757 .and_then(|v| v.parse().ok())
1758 .unwrap_or(0.5_f32)
1759 .max(0.0);
1760
1761 Self {
1762 path_sample_count,
1763 gamma_ratios,
1764 grayscale_enhanced_contrast,
1765 subpixel_enhanced_contrast,
1766 }
1767 }
1768}