1use crate::{CompositorGpuHint, WgpuAtlas, WgpuContext};
2use bytemuck::{Pod, Zeroable};
3use gpui::{
4 AtlasTextureId, Background, Bounds, DevicePixels, GpuSpecs, MonochromeSprite, Path, Point,
5 PolychromeSprite, PrimitiveBatch, Quad, ScaledPixels, Scene, Shadow, Size, SubpixelSprite,
6 Underline, get_gamma_correction_ratios,
7};
8use log::warn;
9use raw_window_handle::{HasDisplayHandle, HasWindowHandle};
10use std::cell::RefCell;
11use std::num::NonZeroU64;
12use std::rc::Rc;
13use std::sync::{Arc, Mutex};
14
15#[repr(C)]
16#[derive(Clone, Copy, Pod, Zeroable)]
17struct GlobalParams {
18 viewport_size: [f32; 2],
19 premultiplied_alpha: u32,
20 pad: u32,
21}
22
23#[repr(C)]
24#[derive(Clone, Copy, Pod, Zeroable)]
25struct PodBounds {
26 origin: [f32; 2],
27 size: [f32; 2],
28}
29
30impl From<Bounds<ScaledPixels>> for PodBounds {
31 fn from(bounds: Bounds<ScaledPixels>) -> Self {
32 Self {
33 origin: [bounds.origin.x.0, bounds.origin.y.0],
34 size: [bounds.size.width.0, bounds.size.height.0],
35 }
36 }
37}
38
39#[repr(C)]
40#[derive(Clone, Copy, Pod, Zeroable)]
41struct SurfaceParams {
42 bounds: PodBounds,
43 content_mask: PodBounds,
44}
45
46#[repr(C)]
47#[derive(Clone, Copy, Pod, Zeroable)]
48struct GammaParams {
49 gamma_ratios: [f32; 4],
50 grayscale_enhanced_contrast: f32,
51 subpixel_enhanced_contrast: f32,
52 _pad: [f32; 2],
53}
54
55#[derive(Clone, Debug)]
56#[repr(C)]
57struct PathSprite {
58 bounds: Bounds<ScaledPixels>,
59}
60
61#[derive(Clone, Debug)]
62#[repr(C)]
63struct PathRasterizationVertex {
64 xy_position: Point<ScaledPixels>,
65 st_position: Point<f32>,
66 color: Background,
67 bounds: Bounds<ScaledPixels>,
68}
69
70pub struct WgpuSurfaceConfig {
71 pub size: Size<DevicePixels>,
72 pub transparent: bool,
73}
74
75struct WgpuPipelines {
76 quads: wgpu::RenderPipeline,
77 shadows: wgpu::RenderPipeline,
78 path_rasterization: wgpu::RenderPipeline,
79 paths: wgpu::RenderPipeline,
80 underlines: wgpu::RenderPipeline,
81 mono_sprites: wgpu::RenderPipeline,
82 subpixel_sprites: Option<wgpu::RenderPipeline>,
83 poly_sprites: wgpu::RenderPipeline,
84 #[allow(dead_code)]
85 surfaces: wgpu::RenderPipeline,
86}
87
88struct WgpuBindGroupLayouts {
89 globals: wgpu::BindGroupLayout,
90 instances: wgpu::BindGroupLayout,
91 instances_with_texture: wgpu::BindGroupLayout,
92 surfaces: wgpu::BindGroupLayout,
93}
94
95/// Shared GPU context reference, used to coordinate device recovery across multiple windows.
96pub type GpuContext = Rc<RefCell<Option<WgpuContext>>>;
97
98/// GPU resources that must be dropped together during device recovery.
99struct WgpuResources {
100 device: Arc<wgpu::Device>,
101 queue: Arc<wgpu::Queue>,
102 surface: wgpu::Surface<'static>,
103 pipelines: WgpuPipelines,
104 bind_group_layouts: WgpuBindGroupLayouts,
105 atlas_sampler: wgpu::Sampler,
106 globals_buffer: wgpu::Buffer,
107 globals_bind_group: wgpu::BindGroup,
108 path_globals_bind_group: wgpu::BindGroup,
109 instance_buffer: wgpu::Buffer,
110 path_intermediate_texture: Option<wgpu::Texture>,
111 path_intermediate_view: Option<wgpu::TextureView>,
112 path_msaa_texture: Option<wgpu::Texture>,
113 path_msaa_view: Option<wgpu::TextureView>,
114}
115
116pub struct WgpuRenderer {
117 /// Shared GPU context for device recovery coordination (unused on WASM).
118 #[allow(dead_code)]
119 context: Option<GpuContext>,
120 /// Compositor GPU hint for adapter selection (unused on WASM).
121 #[allow(dead_code)]
122 compositor_gpu: Option<CompositorGpuHint>,
123 resources: Option<WgpuResources>,
124 surface_config: wgpu::SurfaceConfiguration,
125 atlas: Arc<WgpuAtlas>,
126 path_globals_offset: u64,
127 gamma_offset: u64,
128 instance_buffer_capacity: u64,
129 max_buffer_size: u64,
130 storage_buffer_alignment: u64,
131 rendering_params: RenderingParameters,
132 dual_source_blending: bool,
133 adapter_info: wgpu::AdapterInfo,
134 transparent_alpha_mode: wgpu::CompositeAlphaMode,
135 opaque_alpha_mode: wgpu::CompositeAlphaMode,
136 max_texture_size: u32,
137 last_error: Arc<Mutex<Option<String>>>,
138 failed_frame_count: u32,
139 device_lost: std::sync::Arc<std::sync::atomic::AtomicBool>,
140}
141
142impl WgpuRenderer {
143 fn resources(&self) -> &WgpuResources {
144 self.resources
145 .as_ref()
146 .expect("GPU resources not available")
147 }
148
149 fn resources_mut(&mut self) -> &mut WgpuResources {
150 self.resources
151 .as_mut()
152 .expect("GPU resources not available")
153 }
154
155 /// Creates a new WgpuRenderer from raw window handles.
156 ///
157 /// The `gpu_context` is a shared reference that coordinates GPU context across
158 /// multiple windows. The first window to create a renderer will initialize the
159 /// context; subsequent windows will share it.
160 ///
161 /// # Safety
162 /// The caller must ensure that the window handle remains valid for the lifetime
163 /// of the returned renderer.
164 pub fn new<W: HasWindowHandle + HasDisplayHandle>(
165 gpu_context: GpuContext,
166 window: &W,
167 config: WgpuSurfaceConfig,
168 compositor_gpu: Option<CompositorGpuHint>,
169 ) -> anyhow::Result<Self> {
170 let window_handle = window
171 .window_handle()
172 .map_err(|e| anyhow::anyhow!("Failed to get window handle: {e}"))?;
173 let display_handle = window
174 .display_handle()
175 .map_err(|e| anyhow::anyhow!("Failed to get display handle: {e}"))?;
176
177 let target = wgpu::SurfaceTargetUnsafe::RawHandle {
178 raw_display_handle: display_handle.as_raw(),
179 raw_window_handle: window_handle.as_raw(),
180 };
181
182 // Use the existing context's instance if available, otherwise create a new one.
183 // The surface must be created with the same instance that will be used for
184 // adapter selection, otherwise wgpu will panic.
185 let instance = gpu_context
186 .borrow()
187 .as_ref()
188 .map(|ctx| ctx.instance.clone())
189 .unwrap_or_else(WgpuContext::instance);
190
191 // Safety: The caller guarantees that the window handle is valid for the
192 // lifetime of this renderer. In practice, the RawWindow struct is created
193 // from the native window handles and the surface is dropped before the window.
194 let surface = unsafe {
195 instance
196 .create_surface_unsafe(target)
197 .map_err(|e| anyhow::anyhow!("Failed to create surface: {e}"))?
198 };
199
200 let mut ctx_ref = gpu_context.borrow_mut();
201 let context = match ctx_ref.as_mut() {
202 Some(context) => {
203 context.check_compatible_with_surface(&surface)?;
204 context
205 }
206 None => ctx_ref.insert(WgpuContext::new(instance, &surface, compositor_gpu)?),
207 };
208
209 let atlas = Arc::new(WgpuAtlas::new(
210 Arc::clone(&context.device),
211 Arc::clone(&context.queue),
212 ));
213
214 Self::new_internal(
215 Some(Rc::clone(&gpu_context)),
216 context,
217 surface,
218 config,
219 compositor_gpu,
220 atlas,
221 )
222 }
223
224 fn new_internal(
225 gpu_context: Option<GpuContext>,
226 context: &WgpuContext,
227 surface: wgpu::Surface<'static>,
228 config: WgpuSurfaceConfig,
229 compositor_gpu: Option<CompositorGpuHint>,
230 atlas: Arc<WgpuAtlas>,
231 ) -> anyhow::Result<Self> {
232 let surface_caps = surface.get_capabilities(&context.adapter);
233 let preferred_formats = [
234 wgpu::TextureFormat::Bgra8Unorm,
235 wgpu::TextureFormat::Rgba8Unorm,
236 ];
237 let surface_format = preferred_formats
238 .iter()
239 .find(|f| surface_caps.formats.contains(f))
240 .copied()
241 .or_else(|| surface_caps.formats.iter().find(|f| !f.is_srgb()).copied())
242 .or_else(|| surface_caps.formats.first().copied())
243 .ok_or_else(|| {
244 anyhow::anyhow!(
245 "Surface reports no supported texture formats for adapter {:?}",
246 context.adapter.get_info().name
247 )
248 })?;
249
250 let pick_alpha_mode =
251 |preferences: &[wgpu::CompositeAlphaMode]| -> anyhow::Result<wgpu::CompositeAlphaMode> {
252 preferences
253 .iter()
254 .find(|p| surface_caps.alpha_modes.contains(p))
255 .copied()
256 .or_else(|| surface_caps.alpha_modes.first().copied())
257 .ok_or_else(|| {
258 anyhow::anyhow!(
259 "Surface reports no supported alpha modes for adapter {:?}",
260 context.adapter.get_info().name
261 )
262 })
263 };
264
265 let transparent_alpha_mode = pick_alpha_mode(&[
266 wgpu::CompositeAlphaMode::PreMultiplied,
267 wgpu::CompositeAlphaMode::Inherit,
268 ])?;
269
270 let opaque_alpha_mode = pick_alpha_mode(&[
271 wgpu::CompositeAlphaMode::Opaque,
272 wgpu::CompositeAlphaMode::Inherit,
273 ])?;
274
275 let alpha_mode = if config.transparent {
276 transparent_alpha_mode
277 } else {
278 opaque_alpha_mode
279 };
280
281 let device = Arc::clone(&context.device);
282 let max_texture_size = device.limits().max_texture_dimension_2d;
283
284 let requested_width = config.size.width.0 as u32;
285 let requested_height = config.size.height.0 as u32;
286 let clamped_width = requested_width.min(max_texture_size);
287 let clamped_height = requested_height.min(max_texture_size);
288
289 if clamped_width != requested_width || clamped_height != requested_height {
290 warn!(
291 "Requested surface size ({}, {}) exceeds maximum texture dimension {}. \
292 Clamping to ({}, {}). Window content may not fill the entire window.",
293 requested_width, requested_height, max_texture_size, clamped_width, clamped_height
294 );
295 }
296
297 let surface_config = wgpu::SurfaceConfiguration {
298 usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
299 format: surface_format,
300 width: clamped_width.max(1),
301 height: clamped_height.max(1),
302 present_mode: wgpu::PresentMode::Fifo,
303 desired_maximum_frame_latency: 2,
304 alpha_mode,
305 view_formats: vec![],
306 };
307 // Configure the surface immediately. The adapter selection process already validated
308 // that this adapter can successfully configure this surface.
309 surface.configure(&context.device, &surface_config);
310
311 let queue = Arc::clone(&context.queue);
312 let dual_source_blending = context.supports_dual_source_blending();
313
314 let rendering_params = RenderingParameters::new(&context.adapter, surface_format);
315 let bind_group_layouts = Self::create_bind_group_layouts(&device);
316 let pipelines = Self::create_pipelines(
317 &device,
318 &bind_group_layouts,
319 surface_format,
320 alpha_mode,
321 rendering_params.path_sample_count,
322 dual_source_blending,
323 );
324
325 let atlas_sampler = device.create_sampler(&wgpu::SamplerDescriptor {
326 label: Some("atlas_sampler"),
327 mag_filter: wgpu::FilterMode::Linear,
328 min_filter: wgpu::FilterMode::Linear,
329 ..Default::default()
330 });
331
332 let uniform_alignment = device.limits().min_uniform_buffer_offset_alignment as u64;
333 let globals_size = std::mem::size_of::<GlobalParams>() as u64;
334 let gamma_size = std::mem::size_of::<GammaParams>() as u64;
335 let path_globals_offset = globals_size.next_multiple_of(uniform_alignment);
336 let gamma_offset = (path_globals_offset + globals_size).next_multiple_of(uniform_alignment);
337
338 let globals_buffer = device.create_buffer(&wgpu::BufferDescriptor {
339 label: Some("globals_buffer"),
340 size: gamma_offset + gamma_size,
341 usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
342 mapped_at_creation: false,
343 });
344
345 let storage_buffer_alignment = device.limits().min_storage_buffer_offset_alignment as u64;
346 let max_buffer_size = device.limits().max_buffer_size;
347 let initial_instance_buffer_capacity = 2 * 1024 * 1024;
348 let instance_buffer = device.create_buffer(&wgpu::BufferDescriptor {
349 label: Some("instance_buffer"),
350 size: initial_instance_buffer_capacity,
351 usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
352 mapped_at_creation: false,
353 });
354
355 let globals_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
356 label: Some("globals_bind_group"),
357 layout: &bind_group_layouts.globals,
358 entries: &[
359 wgpu::BindGroupEntry {
360 binding: 0,
361 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
362 buffer: &globals_buffer,
363 offset: 0,
364 size: Some(NonZeroU64::new(globals_size).unwrap()),
365 }),
366 },
367 wgpu::BindGroupEntry {
368 binding: 1,
369 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
370 buffer: &globals_buffer,
371 offset: gamma_offset,
372 size: Some(NonZeroU64::new(gamma_size).unwrap()),
373 }),
374 },
375 ],
376 });
377
378 let path_globals_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
379 label: Some("path_globals_bind_group"),
380 layout: &bind_group_layouts.globals,
381 entries: &[
382 wgpu::BindGroupEntry {
383 binding: 0,
384 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
385 buffer: &globals_buffer,
386 offset: path_globals_offset,
387 size: Some(NonZeroU64::new(globals_size).unwrap()),
388 }),
389 },
390 wgpu::BindGroupEntry {
391 binding: 1,
392 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
393 buffer: &globals_buffer,
394 offset: gamma_offset,
395 size: Some(NonZeroU64::new(gamma_size).unwrap()),
396 }),
397 },
398 ],
399 });
400
401 let adapter_info = context.adapter.get_info();
402
403 let last_error: Arc<Mutex<Option<String>>> = Arc::new(Mutex::new(None));
404 let last_error_clone = Arc::clone(&last_error);
405 device.on_uncaptured_error(Arc::new(move |error| {
406 let mut guard = last_error_clone.lock().unwrap();
407 *guard = Some(error.to_string());
408 }));
409
410 let resources = WgpuResources {
411 device,
412 queue,
413 surface,
414 pipelines,
415 bind_group_layouts,
416 atlas_sampler,
417 globals_buffer,
418 globals_bind_group,
419 path_globals_bind_group,
420 instance_buffer,
421 // Defer intermediate texture creation to first draw call via ensure_intermediate_textures().
422 // This avoids panics when the device/surface is in an invalid state during initialization.
423 path_intermediate_texture: None,
424 path_intermediate_view: None,
425 path_msaa_texture: None,
426 path_msaa_view: None,
427 };
428
429 Ok(Self {
430 context: gpu_context,
431 compositor_gpu,
432 resources: Some(resources),
433 surface_config,
434 atlas,
435 path_globals_offset,
436 gamma_offset,
437 instance_buffer_capacity: initial_instance_buffer_capacity,
438 max_buffer_size,
439 storage_buffer_alignment,
440 rendering_params,
441 dual_source_blending,
442 adapter_info,
443 transparent_alpha_mode,
444 opaque_alpha_mode,
445 max_texture_size,
446 last_error,
447 failed_frame_count: 0,
448 device_lost: context.device_lost_flag(),
449 })
450 }
451
452 fn create_bind_group_layouts(device: &wgpu::Device) -> WgpuBindGroupLayouts {
453 let globals =
454 device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
455 label: Some("globals_layout"),
456 entries: &[
457 wgpu::BindGroupLayoutEntry {
458 binding: 0,
459 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
460 ty: wgpu::BindingType::Buffer {
461 ty: wgpu::BufferBindingType::Uniform,
462 has_dynamic_offset: false,
463 min_binding_size: NonZeroU64::new(
464 std::mem::size_of::<GlobalParams>() as u64
465 ),
466 },
467 count: None,
468 },
469 wgpu::BindGroupLayoutEntry {
470 binding: 1,
471 visibility: wgpu::ShaderStages::FRAGMENT,
472 ty: wgpu::BindingType::Buffer {
473 ty: wgpu::BufferBindingType::Uniform,
474 has_dynamic_offset: false,
475 min_binding_size: NonZeroU64::new(
476 std::mem::size_of::<GammaParams>() as u64
477 ),
478 },
479 count: None,
480 },
481 ],
482 });
483
484 let storage_buffer_entry = |binding: u32| wgpu::BindGroupLayoutEntry {
485 binding,
486 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
487 ty: wgpu::BindingType::Buffer {
488 ty: wgpu::BufferBindingType::Storage { read_only: true },
489 has_dynamic_offset: false,
490 min_binding_size: None,
491 },
492 count: None,
493 };
494
495 let instances = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
496 label: Some("instances_layout"),
497 entries: &[storage_buffer_entry(0)],
498 });
499
500 let instances_with_texture =
501 device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
502 label: Some("instances_with_texture_layout"),
503 entries: &[
504 storage_buffer_entry(0),
505 wgpu::BindGroupLayoutEntry {
506 binding: 1,
507 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
508 ty: wgpu::BindingType::Texture {
509 sample_type: wgpu::TextureSampleType::Float { filterable: true },
510 view_dimension: wgpu::TextureViewDimension::D2,
511 multisampled: false,
512 },
513 count: None,
514 },
515 wgpu::BindGroupLayoutEntry {
516 binding: 2,
517 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
518 ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
519 count: None,
520 },
521 ],
522 });
523
524 let surfaces = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
525 label: Some("surfaces_layout"),
526 entries: &[
527 wgpu::BindGroupLayoutEntry {
528 binding: 0,
529 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
530 ty: wgpu::BindingType::Buffer {
531 ty: wgpu::BufferBindingType::Uniform,
532 has_dynamic_offset: false,
533 min_binding_size: NonZeroU64::new(
534 std::mem::size_of::<SurfaceParams>() as u64
535 ),
536 },
537 count: None,
538 },
539 wgpu::BindGroupLayoutEntry {
540 binding: 1,
541 visibility: wgpu::ShaderStages::FRAGMENT,
542 ty: wgpu::BindingType::Texture {
543 sample_type: wgpu::TextureSampleType::Float { filterable: true },
544 view_dimension: wgpu::TextureViewDimension::D2,
545 multisampled: false,
546 },
547 count: None,
548 },
549 wgpu::BindGroupLayoutEntry {
550 binding: 2,
551 visibility: wgpu::ShaderStages::FRAGMENT,
552 ty: wgpu::BindingType::Texture {
553 sample_type: wgpu::TextureSampleType::Float { filterable: true },
554 view_dimension: wgpu::TextureViewDimension::D2,
555 multisampled: false,
556 },
557 count: None,
558 },
559 wgpu::BindGroupLayoutEntry {
560 binding: 3,
561 visibility: wgpu::ShaderStages::FRAGMENT,
562 ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
563 count: None,
564 },
565 ],
566 });
567
568 WgpuBindGroupLayouts {
569 globals,
570 instances,
571 instances_with_texture,
572 surfaces,
573 }
574 }
575
576 fn create_pipelines(
577 device: &wgpu::Device,
578 layouts: &WgpuBindGroupLayouts,
579 surface_format: wgpu::TextureFormat,
580 alpha_mode: wgpu::CompositeAlphaMode,
581 path_sample_count: u32,
582 dual_source_blending: bool,
583 ) -> WgpuPipelines {
584 let shader_source = include_str!("shaders.wgsl");
585 let shader_module = device.create_shader_module(wgpu::ShaderModuleDescriptor {
586 label: Some("gpui_shaders"),
587 source: wgpu::ShaderSource::Wgsl(shader_source.into()),
588 });
589
590 let blend_mode = match alpha_mode {
591 wgpu::CompositeAlphaMode::PreMultiplied => {
592 wgpu::BlendState::PREMULTIPLIED_ALPHA_BLENDING
593 }
594 _ => wgpu::BlendState::ALPHA_BLENDING,
595 };
596
597 let color_target = wgpu::ColorTargetState {
598 format: surface_format,
599 blend: Some(blend_mode),
600 write_mask: wgpu::ColorWrites::ALL,
601 };
602
603 let create_pipeline = |name: &str,
604 vs_entry: &str,
605 fs_entry: &str,
606 globals_layout: &wgpu::BindGroupLayout,
607 data_layout: &wgpu::BindGroupLayout,
608 topology: wgpu::PrimitiveTopology,
609 color_targets: &[Option<wgpu::ColorTargetState>],
610 sample_count: u32| {
611 let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
612 label: Some(&format!("{name}_layout")),
613 bind_group_layouts: &[globals_layout, data_layout],
614 immediate_size: 0,
615 });
616
617 device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
618 label: Some(name),
619 layout: Some(&pipeline_layout),
620 vertex: wgpu::VertexState {
621 module: &shader_module,
622 entry_point: Some(vs_entry),
623 buffers: &[],
624 compilation_options: wgpu::PipelineCompilationOptions::default(),
625 },
626 fragment: Some(wgpu::FragmentState {
627 module: &shader_module,
628 entry_point: Some(fs_entry),
629 targets: color_targets,
630 compilation_options: wgpu::PipelineCompilationOptions::default(),
631 }),
632 primitive: wgpu::PrimitiveState {
633 topology,
634 strip_index_format: None,
635 front_face: wgpu::FrontFace::Ccw,
636 cull_mode: None,
637 polygon_mode: wgpu::PolygonMode::Fill,
638 unclipped_depth: false,
639 conservative: false,
640 },
641 depth_stencil: None,
642 multisample: wgpu::MultisampleState {
643 count: sample_count,
644 mask: !0,
645 alpha_to_coverage_enabled: false,
646 },
647 multiview_mask: None,
648 cache: None,
649 })
650 };
651
652 let quads = create_pipeline(
653 "quads",
654 "vs_quad",
655 "fs_quad",
656 &layouts.globals,
657 &layouts.instances,
658 wgpu::PrimitiveTopology::TriangleStrip,
659 &[Some(color_target.clone())],
660 1,
661 );
662
663 let shadows = create_pipeline(
664 "shadows",
665 "vs_shadow",
666 "fs_shadow",
667 &layouts.globals,
668 &layouts.instances,
669 wgpu::PrimitiveTopology::TriangleStrip,
670 &[Some(color_target.clone())],
671 1,
672 );
673
674 let path_rasterization = create_pipeline(
675 "path_rasterization",
676 "vs_path_rasterization",
677 "fs_path_rasterization",
678 &layouts.globals,
679 &layouts.instances,
680 wgpu::PrimitiveTopology::TriangleList,
681 &[Some(wgpu::ColorTargetState {
682 format: surface_format,
683 blend: Some(wgpu::BlendState::PREMULTIPLIED_ALPHA_BLENDING),
684 write_mask: wgpu::ColorWrites::ALL,
685 })],
686 path_sample_count,
687 );
688
689 let paths_blend = wgpu::BlendState {
690 color: wgpu::BlendComponent {
691 src_factor: wgpu::BlendFactor::One,
692 dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
693 operation: wgpu::BlendOperation::Add,
694 },
695 alpha: wgpu::BlendComponent {
696 src_factor: wgpu::BlendFactor::One,
697 dst_factor: wgpu::BlendFactor::One,
698 operation: wgpu::BlendOperation::Add,
699 },
700 };
701
702 let paths = create_pipeline(
703 "paths",
704 "vs_path",
705 "fs_path",
706 &layouts.globals,
707 &layouts.instances_with_texture,
708 wgpu::PrimitiveTopology::TriangleStrip,
709 &[Some(wgpu::ColorTargetState {
710 format: surface_format,
711 blend: Some(paths_blend),
712 write_mask: wgpu::ColorWrites::ALL,
713 })],
714 1,
715 );
716
717 let underlines = create_pipeline(
718 "underlines",
719 "vs_underline",
720 "fs_underline",
721 &layouts.globals,
722 &layouts.instances,
723 wgpu::PrimitiveTopology::TriangleStrip,
724 &[Some(color_target.clone())],
725 1,
726 );
727
728 let mono_sprites = create_pipeline(
729 "mono_sprites",
730 "vs_mono_sprite",
731 "fs_mono_sprite",
732 &layouts.globals,
733 &layouts.instances_with_texture,
734 wgpu::PrimitiveTopology::TriangleStrip,
735 &[Some(color_target.clone())],
736 1,
737 );
738
739 let subpixel_sprites = if dual_source_blending {
740 let subpixel_blend = wgpu::BlendState {
741 color: wgpu::BlendComponent {
742 src_factor: wgpu::BlendFactor::Src1,
743 dst_factor: wgpu::BlendFactor::OneMinusSrc1,
744 operation: wgpu::BlendOperation::Add,
745 },
746 alpha: wgpu::BlendComponent {
747 src_factor: wgpu::BlendFactor::One,
748 dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
749 operation: wgpu::BlendOperation::Add,
750 },
751 };
752
753 Some(create_pipeline(
754 "subpixel_sprites",
755 "vs_subpixel_sprite",
756 "fs_subpixel_sprite",
757 &layouts.globals,
758 &layouts.instances_with_texture,
759 wgpu::PrimitiveTopology::TriangleStrip,
760 &[Some(wgpu::ColorTargetState {
761 format: surface_format,
762 blend: Some(subpixel_blend),
763 write_mask: wgpu::ColorWrites::COLOR,
764 })],
765 1,
766 ))
767 } else {
768 None
769 };
770
771 let poly_sprites = create_pipeline(
772 "poly_sprites",
773 "vs_poly_sprite",
774 "fs_poly_sprite",
775 &layouts.globals,
776 &layouts.instances_with_texture,
777 wgpu::PrimitiveTopology::TriangleStrip,
778 &[Some(color_target.clone())],
779 1,
780 );
781
782 let surfaces = create_pipeline(
783 "surfaces",
784 "vs_surface",
785 "fs_surface",
786 &layouts.globals,
787 &layouts.surfaces,
788 wgpu::PrimitiveTopology::TriangleStrip,
789 &[Some(color_target)],
790 1,
791 );
792
793 WgpuPipelines {
794 quads,
795 shadows,
796 path_rasterization,
797 paths,
798 underlines,
799 mono_sprites,
800 subpixel_sprites,
801 poly_sprites,
802 surfaces,
803 }
804 }
805
806 fn create_path_intermediate(
807 device: &wgpu::Device,
808 format: wgpu::TextureFormat,
809 width: u32,
810 height: u32,
811 ) -> (wgpu::Texture, wgpu::TextureView) {
812 let texture = device.create_texture(&wgpu::TextureDescriptor {
813 label: Some("path_intermediate"),
814 size: wgpu::Extent3d {
815 width: width.max(1),
816 height: height.max(1),
817 depth_or_array_layers: 1,
818 },
819 mip_level_count: 1,
820 sample_count: 1,
821 dimension: wgpu::TextureDimension::D2,
822 format,
823 usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::TEXTURE_BINDING,
824 view_formats: &[],
825 });
826 let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
827 (texture, view)
828 }
829
830 fn create_msaa_if_needed(
831 device: &wgpu::Device,
832 format: wgpu::TextureFormat,
833 width: u32,
834 height: u32,
835 sample_count: u32,
836 ) -> Option<(wgpu::Texture, wgpu::TextureView)> {
837 if sample_count <= 1 {
838 return None;
839 }
840 let texture = device.create_texture(&wgpu::TextureDescriptor {
841 label: Some("path_msaa"),
842 size: wgpu::Extent3d {
843 width: width.max(1),
844 height: height.max(1),
845 depth_or_array_layers: 1,
846 },
847 mip_level_count: 1,
848 sample_count,
849 dimension: wgpu::TextureDimension::D2,
850 format,
851 usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
852 view_formats: &[],
853 });
854 let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
855 Some((texture, view))
856 }
857
858 pub fn update_drawable_size(&mut self, size: Size<DevicePixels>) {
859 let width = size.width.0 as u32;
860 let height = size.height.0 as u32;
861
862 if width != self.surface_config.width || height != self.surface_config.height {
863 let clamped_width = width.min(self.max_texture_size);
864 let clamped_height = height.min(self.max_texture_size);
865
866 if clamped_width != width || clamped_height != height {
867 warn!(
868 "Requested surface size ({}, {}) exceeds maximum texture dimension {}. \
869 Clamping to ({}, {}). Window content may not fill the entire window.",
870 width, height, self.max_texture_size, clamped_width, clamped_height
871 );
872 }
873
874 self.surface_config.width = clamped_width.max(1);
875 self.surface_config.height = clamped_height.max(1);
876 let surface_config = self.surface_config.clone();
877
878 let resources = self.resources_mut();
879
880 // Wait for any in-flight GPU work to complete before destroying textures
881 if let Err(e) = resources.device.poll(wgpu::PollType::Wait {
882 submission_index: None,
883 timeout: None,
884 }) {
885 warn!("Failed to poll device during resize: {e:?}");
886 }
887
888 // Destroy old textures before allocating new ones to avoid GPU memory spikes
889 if let Some(ref texture) = resources.path_intermediate_texture {
890 texture.destroy();
891 }
892 if let Some(ref texture) = resources.path_msaa_texture {
893 texture.destroy();
894 }
895
896 resources
897 .surface
898 .configure(&resources.device, &surface_config);
899
900 // Invalidate intermediate textures - they will be lazily recreated
901 // in draw() after we confirm the surface is healthy. This avoids
902 // panics when the device/surface is in an invalid state during resize.
903 resources.path_intermediate_texture = None;
904 resources.path_intermediate_view = None;
905 resources.path_msaa_texture = None;
906 resources.path_msaa_view = None;
907 }
908 }
909
910 fn ensure_intermediate_textures(&mut self) {
911 if self.resources().path_intermediate_texture.is_some() {
912 return;
913 }
914
915 let format = self.surface_config.format;
916 let width = self.surface_config.width;
917 let height = self.surface_config.height;
918 let path_sample_count = self.rendering_params.path_sample_count;
919 let resources = self.resources_mut();
920
921 let (t, v) = Self::create_path_intermediate(&resources.device, format, width, height);
922 resources.path_intermediate_texture = Some(t);
923 resources.path_intermediate_view = Some(v);
924
925 let (path_msaa_texture, path_msaa_view) = Self::create_msaa_if_needed(
926 &resources.device,
927 format,
928 width,
929 height,
930 path_sample_count,
931 )
932 .map(|(t, v)| (Some(t), Some(v)))
933 .unwrap_or((None, None));
934 resources.path_msaa_texture = path_msaa_texture;
935 resources.path_msaa_view = path_msaa_view;
936 }
937
938 pub fn update_transparency(&mut self, transparent: bool) {
939 let new_alpha_mode = if transparent {
940 self.transparent_alpha_mode
941 } else {
942 self.opaque_alpha_mode
943 };
944
945 if new_alpha_mode != self.surface_config.alpha_mode {
946 self.surface_config.alpha_mode = new_alpha_mode;
947 let surface_config = self.surface_config.clone();
948 let path_sample_count = self.rendering_params.path_sample_count;
949 let dual_source_blending = self.dual_source_blending;
950 let resources = self.resources_mut();
951 resources
952 .surface
953 .configure(&resources.device, &surface_config);
954 resources.pipelines = Self::create_pipelines(
955 &resources.device,
956 &resources.bind_group_layouts,
957 surface_config.format,
958 surface_config.alpha_mode,
959 path_sample_count,
960 dual_source_blending,
961 );
962 }
963 }
964
965 #[allow(dead_code)]
966 pub fn viewport_size(&self) -> Size<DevicePixels> {
967 Size {
968 width: DevicePixels(self.surface_config.width as i32),
969 height: DevicePixels(self.surface_config.height as i32),
970 }
971 }
972
973 pub fn sprite_atlas(&self) -> &Arc<WgpuAtlas> {
974 &self.atlas
975 }
976
977 pub fn gpu_specs(&self) -> GpuSpecs {
978 GpuSpecs {
979 is_software_emulated: self.adapter_info.device_type == wgpu::DeviceType::Cpu,
980 device_name: self.adapter_info.name.clone(),
981 driver_name: self.adapter_info.driver.clone(),
982 driver_info: self.adapter_info.driver_info.clone(),
983 }
984 }
985
986 pub fn max_texture_size(&self) -> u32 {
987 self.max_texture_size
988 }
989
990 pub fn draw(&mut self, scene: &Scene) {
991 let last_error = self.last_error.lock().unwrap().take();
992 if let Some(error) = last_error {
993 self.failed_frame_count += 1;
994 log::error!(
995 "GPU error during frame (failure {} of 20): {error}",
996 self.failed_frame_count
997 );
998 if self.failed_frame_count > 20 {
999 panic!("Too many consecutive GPU errors. Last error: {error}");
1000 }
1001 } else {
1002 self.failed_frame_count = 0;
1003 }
1004
1005 self.atlas.before_frame();
1006
1007 let texture_result = self.resources().surface.get_current_texture();
1008 let frame = match texture_result {
1009 Ok(frame) => frame,
1010 Err(wgpu::SurfaceError::Lost | wgpu::SurfaceError::Outdated) => {
1011 let surface_config = self.surface_config.clone();
1012 let resources = self.resources_mut();
1013 resources
1014 .surface
1015 .configure(&resources.device, &surface_config);
1016 return;
1017 }
1018 Err(e) => {
1019 *self.last_error.lock().unwrap() =
1020 Some(format!("Failed to acquire surface texture: {e}"));
1021 return;
1022 }
1023 };
1024
1025 // Now that we know the surface is healthy, ensure intermediate textures exist
1026 self.ensure_intermediate_textures();
1027
1028 let frame_view = frame
1029 .texture
1030 .create_view(&wgpu::TextureViewDescriptor::default());
1031
1032 let gamma_params = GammaParams {
1033 gamma_ratios: self.rendering_params.gamma_ratios,
1034 grayscale_enhanced_contrast: self.rendering_params.grayscale_enhanced_contrast,
1035 subpixel_enhanced_contrast: self.rendering_params.subpixel_enhanced_contrast,
1036 _pad: [0.0; 2],
1037 };
1038
1039 let globals = GlobalParams {
1040 viewport_size: [
1041 self.surface_config.width as f32,
1042 self.surface_config.height as f32,
1043 ],
1044 premultiplied_alpha: if self.surface_config.alpha_mode
1045 == wgpu::CompositeAlphaMode::PreMultiplied
1046 {
1047 1
1048 } else {
1049 0
1050 },
1051 pad: 0,
1052 };
1053
1054 let path_globals = GlobalParams {
1055 premultiplied_alpha: 0,
1056 ..globals
1057 };
1058
1059 {
1060 let resources = self.resources();
1061 resources.queue.write_buffer(
1062 &resources.globals_buffer,
1063 0,
1064 bytemuck::bytes_of(&globals),
1065 );
1066 resources.queue.write_buffer(
1067 &resources.globals_buffer,
1068 self.path_globals_offset,
1069 bytemuck::bytes_of(&path_globals),
1070 );
1071 resources.queue.write_buffer(
1072 &resources.globals_buffer,
1073 self.gamma_offset,
1074 bytemuck::bytes_of(&gamma_params),
1075 );
1076 }
1077
1078 loop {
1079 let mut instance_offset: u64 = 0;
1080 let mut overflow = false;
1081
1082 let mut encoder =
1083 self.resources()
1084 .device
1085 .create_command_encoder(&wgpu::CommandEncoderDescriptor {
1086 label: Some("main_encoder"),
1087 });
1088
1089 {
1090 let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
1091 label: Some("main_pass"),
1092 color_attachments: &[Some(wgpu::RenderPassColorAttachment {
1093 view: &frame_view,
1094 resolve_target: None,
1095 ops: wgpu::Operations {
1096 load: wgpu::LoadOp::Clear(wgpu::Color::TRANSPARENT),
1097 store: wgpu::StoreOp::Store,
1098 },
1099 depth_slice: None,
1100 })],
1101 depth_stencil_attachment: None,
1102 ..Default::default()
1103 });
1104
1105 for batch in scene.batches() {
1106 let ok = match batch {
1107 PrimitiveBatch::Quads(range) => {
1108 self.draw_quads(&scene.quads[range], &mut instance_offset, &mut pass)
1109 }
1110 PrimitiveBatch::Shadows(range) => self.draw_shadows(
1111 &scene.shadows[range],
1112 &mut instance_offset,
1113 &mut pass,
1114 ),
1115 PrimitiveBatch::Paths(range) => {
1116 let paths = &scene.paths[range];
1117 if paths.is_empty() {
1118 continue;
1119 }
1120
1121 drop(pass);
1122
1123 let did_draw = self.draw_paths_to_intermediate(
1124 &mut encoder,
1125 paths,
1126 &mut instance_offset,
1127 );
1128
1129 pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
1130 label: Some("main_pass_continued"),
1131 color_attachments: &[Some(wgpu::RenderPassColorAttachment {
1132 view: &frame_view,
1133 resolve_target: None,
1134 ops: wgpu::Operations {
1135 load: wgpu::LoadOp::Load,
1136 store: wgpu::StoreOp::Store,
1137 },
1138 depth_slice: None,
1139 })],
1140 depth_stencil_attachment: None,
1141 ..Default::default()
1142 });
1143
1144 if did_draw {
1145 self.draw_paths_from_intermediate(
1146 paths,
1147 &mut instance_offset,
1148 &mut pass,
1149 )
1150 } else {
1151 false
1152 }
1153 }
1154 PrimitiveBatch::Underlines(range) => self.draw_underlines(
1155 &scene.underlines[range],
1156 &mut instance_offset,
1157 &mut pass,
1158 ),
1159 PrimitiveBatch::MonochromeSprites { texture_id, range } => self
1160 .draw_monochrome_sprites(
1161 &scene.monochrome_sprites[range],
1162 texture_id,
1163 &mut instance_offset,
1164 &mut pass,
1165 ),
1166 PrimitiveBatch::SubpixelSprites { texture_id, range } => self
1167 .draw_subpixel_sprites(
1168 &scene.subpixel_sprites[range],
1169 texture_id,
1170 &mut instance_offset,
1171 &mut pass,
1172 ),
1173 PrimitiveBatch::PolychromeSprites { texture_id, range } => self
1174 .draw_polychrome_sprites(
1175 &scene.polychrome_sprites[range],
1176 texture_id,
1177 &mut instance_offset,
1178 &mut pass,
1179 ),
1180 PrimitiveBatch::Surfaces(_surfaces) => {
1181 // Surfaces are macOS-only for video playback
1182 // Not implemented for Linux/wgpu
1183 true
1184 }
1185 };
1186 if !ok {
1187 overflow = true;
1188 break;
1189 }
1190 }
1191 }
1192
1193 if overflow {
1194 drop(encoder);
1195 if self.instance_buffer_capacity >= 256 * 1024 * 1024 {
1196 log::error!(
1197 "instance buffer size grew too large: {}",
1198 self.instance_buffer_capacity
1199 );
1200 frame.present();
1201 return;
1202 }
1203 self.grow_instance_buffer();
1204 continue;
1205 }
1206
1207 self.resources()
1208 .queue
1209 .submit(std::iter::once(encoder.finish()));
1210 frame.present();
1211 return;
1212 }
1213 }
1214
1215 fn draw_quads(
1216 &self,
1217 quads: &[Quad],
1218 instance_offset: &mut u64,
1219 pass: &mut wgpu::RenderPass<'_>,
1220 ) -> bool {
1221 let data = unsafe { Self::instance_bytes(quads) };
1222 self.draw_instances(
1223 data,
1224 quads.len() as u32,
1225 &self.resources().pipelines.quads,
1226 instance_offset,
1227 pass,
1228 )
1229 }
1230
1231 fn draw_shadows(
1232 &self,
1233 shadows: &[Shadow],
1234 instance_offset: &mut u64,
1235 pass: &mut wgpu::RenderPass<'_>,
1236 ) -> bool {
1237 let data = unsafe { Self::instance_bytes(shadows) };
1238 self.draw_instances(
1239 data,
1240 shadows.len() as u32,
1241 &self.resources().pipelines.shadows,
1242 instance_offset,
1243 pass,
1244 )
1245 }
1246
1247 fn draw_underlines(
1248 &self,
1249 underlines: &[Underline],
1250 instance_offset: &mut u64,
1251 pass: &mut wgpu::RenderPass<'_>,
1252 ) -> bool {
1253 let data = unsafe { Self::instance_bytes(underlines) };
1254 self.draw_instances(
1255 data,
1256 underlines.len() as u32,
1257 &self.resources().pipelines.underlines,
1258 instance_offset,
1259 pass,
1260 )
1261 }
1262
1263 fn draw_monochrome_sprites(
1264 &self,
1265 sprites: &[MonochromeSprite],
1266 texture_id: AtlasTextureId,
1267 instance_offset: &mut u64,
1268 pass: &mut wgpu::RenderPass<'_>,
1269 ) -> bool {
1270 let tex_info = self.atlas.get_texture_info(texture_id);
1271 let data = unsafe { Self::instance_bytes(sprites) };
1272 self.draw_instances_with_texture(
1273 data,
1274 sprites.len() as u32,
1275 &tex_info.view,
1276 &self.resources().pipelines.mono_sprites,
1277 instance_offset,
1278 pass,
1279 )
1280 }
1281
1282 fn draw_subpixel_sprites(
1283 &self,
1284 sprites: &[SubpixelSprite],
1285 texture_id: AtlasTextureId,
1286 instance_offset: &mut u64,
1287 pass: &mut wgpu::RenderPass<'_>,
1288 ) -> bool {
1289 let tex_info = self.atlas.get_texture_info(texture_id);
1290 let data = unsafe { Self::instance_bytes(sprites) };
1291 let resources = self.resources();
1292 let pipeline = resources
1293 .pipelines
1294 .subpixel_sprites
1295 .as_ref()
1296 .unwrap_or(&resources.pipelines.mono_sprites);
1297 self.draw_instances_with_texture(
1298 data,
1299 sprites.len() as u32,
1300 &tex_info.view,
1301 pipeline,
1302 instance_offset,
1303 pass,
1304 )
1305 }
1306
1307 fn draw_polychrome_sprites(
1308 &self,
1309 sprites: &[PolychromeSprite],
1310 texture_id: AtlasTextureId,
1311 instance_offset: &mut u64,
1312 pass: &mut wgpu::RenderPass<'_>,
1313 ) -> bool {
1314 let tex_info = self.atlas.get_texture_info(texture_id);
1315 let data = unsafe { Self::instance_bytes(sprites) };
1316 self.draw_instances_with_texture(
1317 data,
1318 sprites.len() as u32,
1319 &tex_info.view,
1320 &self.resources().pipelines.poly_sprites,
1321 instance_offset,
1322 pass,
1323 )
1324 }
1325
1326 fn draw_instances(
1327 &self,
1328 data: &[u8],
1329 instance_count: u32,
1330 pipeline: &wgpu::RenderPipeline,
1331 instance_offset: &mut u64,
1332 pass: &mut wgpu::RenderPass<'_>,
1333 ) -> bool {
1334 if instance_count == 0 {
1335 return true;
1336 }
1337 let Some((offset, size)) = self.write_to_instance_buffer(instance_offset, data) else {
1338 return false;
1339 };
1340 let resources = self.resources();
1341 let bind_group = resources
1342 .device
1343 .create_bind_group(&wgpu::BindGroupDescriptor {
1344 label: None,
1345 layout: &resources.bind_group_layouts.instances,
1346 entries: &[wgpu::BindGroupEntry {
1347 binding: 0,
1348 resource: self.instance_binding(offset, size),
1349 }],
1350 });
1351 pass.set_pipeline(pipeline);
1352 pass.set_bind_group(0, &resources.globals_bind_group, &[]);
1353 pass.set_bind_group(1, &bind_group, &[]);
1354 pass.draw(0..4, 0..instance_count);
1355 true
1356 }
1357
1358 fn draw_instances_with_texture(
1359 &self,
1360 data: &[u8],
1361 instance_count: u32,
1362 texture_view: &wgpu::TextureView,
1363 pipeline: &wgpu::RenderPipeline,
1364 instance_offset: &mut u64,
1365 pass: &mut wgpu::RenderPass<'_>,
1366 ) -> bool {
1367 if instance_count == 0 {
1368 return true;
1369 }
1370 let Some((offset, size)) = self.write_to_instance_buffer(instance_offset, data) else {
1371 return false;
1372 };
1373 let resources = self.resources();
1374 let bind_group = resources
1375 .device
1376 .create_bind_group(&wgpu::BindGroupDescriptor {
1377 label: None,
1378 layout: &resources.bind_group_layouts.instances_with_texture,
1379 entries: &[
1380 wgpu::BindGroupEntry {
1381 binding: 0,
1382 resource: self.instance_binding(offset, size),
1383 },
1384 wgpu::BindGroupEntry {
1385 binding: 1,
1386 resource: wgpu::BindingResource::TextureView(texture_view),
1387 },
1388 wgpu::BindGroupEntry {
1389 binding: 2,
1390 resource: wgpu::BindingResource::Sampler(&resources.atlas_sampler),
1391 },
1392 ],
1393 });
1394 pass.set_pipeline(pipeline);
1395 pass.set_bind_group(0, &resources.globals_bind_group, &[]);
1396 pass.set_bind_group(1, &bind_group, &[]);
1397 pass.draw(0..4, 0..instance_count);
1398 true
1399 }
1400
1401 unsafe fn instance_bytes<T>(instances: &[T]) -> &[u8] {
1402 unsafe {
1403 std::slice::from_raw_parts(
1404 instances.as_ptr() as *const u8,
1405 std::mem::size_of_val(instances),
1406 )
1407 }
1408 }
1409
1410 fn draw_paths_from_intermediate(
1411 &self,
1412 paths: &[Path<ScaledPixels>],
1413 instance_offset: &mut u64,
1414 pass: &mut wgpu::RenderPass<'_>,
1415 ) -> bool {
1416 let first_path = &paths[0];
1417 let sprites: Vec<PathSprite> = if paths.last().map(|p| &p.order) == Some(&first_path.order)
1418 {
1419 paths
1420 .iter()
1421 .map(|p| PathSprite {
1422 bounds: p.clipped_bounds(),
1423 })
1424 .collect()
1425 } else {
1426 let mut bounds = first_path.clipped_bounds();
1427 for path in paths.iter().skip(1) {
1428 bounds = bounds.union(&path.clipped_bounds());
1429 }
1430 vec![PathSprite { bounds }]
1431 };
1432
1433 let resources = self.resources();
1434 let Some(path_intermediate_view) = resources.path_intermediate_view.as_ref() else {
1435 return true;
1436 };
1437
1438 let sprite_data = unsafe { Self::instance_bytes(&sprites) };
1439 self.draw_instances_with_texture(
1440 sprite_data,
1441 sprites.len() as u32,
1442 path_intermediate_view,
1443 &resources.pipelines.paths,
1444 instance_offset,
1445 pass,
1446 )
1447 }
1448
1449 fn draw_paths_to_intermediate(
1450 &self,
1451 encoder: &mut wgpu::CommandEncoder,
1452 paths: &[Path<ScaledPixels>],
1453 instance_offset: &mut u64,
1454 ) -> bool {
1455 let mut vertices = Vec::new();
1456 for path in paths {
1457 let bounds = path.clipped_bounds();
1458 vertices.extend(path.vertices.iter().map(|v| PathRasterizationVertex {
1459 xy_position: v.xy_position,
1460 st_position: v.st_position,
1461 color: path.color,
1462 bounds,
1463 }));
1464 }
1465
1466 if vertices.is_empty() {
1467 return true;
1468 }
1469
1470 let vertex_data = unsafe { Self::instance_bytes(&vertices) };
1471 let Some((vertex_offset, vertex_size)) =
1472 self.write_to_instance_buffer(instance_offset, vertex_data)
1473 else {
1474 return false;
1475 };
1476
1477 let resources = self.resources();
1478 let data_bind_group = resources
1479 .device
1480 .create_bind_group(&wgpu::BindGroupDescriptor {
1481 label: Some("path_rasterization_bind_group"),
1482 layout: &resources.bind_group_layouts.instances,
1483 entries: &[wgpu::BindGroupEntry {
1484 binding: 0,
1485 resource: self.instance_binding(vertex_offset, vertex_size),
1486 }],
1487 });
1488
1489 let Some(path_intermediate_view) = resources.path_intermediate_view.as_ref() else {
1490 return true;
1491 };
1492
1493 let (target_view, resolve_target) = if let Some(ref msaa_view) = resources.path_msaa_view {
1494 (msaa_view, Some(path_intermediate_view))
1495 } else {
1496 (path_intermediate_view, None)
1497 };
1498
1499 {
1500 let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
1501 label: Some("path_rasterization_pass"),
1502 color_attachments: &[Some(wgpu::RenderPassColorAttachment {
1503 view: target_view,
1504 resolve_target,
1505 ops: wgpu::Operations {
1506 load: wgpu::LoadOp::Clear(wgpu::Color::TRANSPARENT),
1507 store: wgpu::StoreOp::Store,
1508 },
1509 depth_slice: None,
1510 })],
1511 depth_stencil_attachment: None,
1512 ..Default::default()
1513 });
1514
1515 pass.set_pipeline(&resources.pipelines.path_rasterization);
1516 pass.set_bind_group(0, &resources.path_globals_bind_group, &[]);
1517 pass.set_bind_group(1, &data_bind_group, &[]);
1518 pass.draw(0..vertices.len() as u32, 0..1);
1519 }
1520
1521 true
1522 }
1523
1524 fn grow_instance_buffer(&mut self) {
1525 let new_capacity = (self.instance_buffer_capacity * 2).min(self.max_buffer_size);
1526 log::info!("increased instance buffer size to {}", new_capacity);
1527 let resources = self.resources_mut();
1528 resources.instance_buffer = resources.device.create_buffer(&wgpu::BufferDescriptor {
1529 label: Some("instance_buffer"),
1530 size: new_capacity,
1531 usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
1532 mapped_at_creation: false,
1533 });
1534 self.instance_buffer_capacity = new_capacity;
1535 }
1536
1537 fn write_to_instance_buffer(
1538 &self,
1539 instance_offset: &mut u64,
1540 data: &[u8],
1541 ) -> Option<(u64, NonZeroU64)> {
1542 let offset = (*instance_offset).next_multiple_of(self.storage_buffer_alignment);
1543 let size = (data.len() as u64).max(16);
1544 if offset + size > self.instance_buffer_capacity {
1545 return None;
1546 }
1547 let resources = self.resources();
1548 resources
1549 .queue
1550 .write_buffer(&resources.instance_buffer, offset, data);
1551 *instance_offset = offset + size;
1552 Some((offset, NonZeroU64::new(size).expect("size is at least 16")))
1553 }
1554
1555 fn instance_binding(&self, offset: u64, size: NonZeroU64) -> wgpu::BindingResource<'_> {
1556 wgpu::BindingResource::Buffer(wgpu::BufferBinding {
1557 buffer: &self.resources().instance_buffer,
1558 offset,
1559 size: Some(size),
1560 })
1561 }
1562
1563 pub fn destroy(&mut self) {
1564 // wgpu resources are automatically cleaned up when dropped
1565 }
1566
1567 /// Returns true if the GPU device was lost and recovery is needed.
1568 pub fn device_lost(&self) -> bool {
1569 self.device_lost.load(std::sync::atomic::Ordering::SeqCst)
1570 }
1571
1572 /// Recovers from a lost GPU device by recreating the renderer with a new context.
1573 ///
1574 /// Call this after detecting `device_lost()` returns true.
1575 ///
1576 /// This method coordinates recovery across multiple windows:
1577 /// - The first window to call this will recreate the shared context
1578 /// - Subsequent windows will adopt the already-recovered context
1579 #[cfg(not(target_family = "wasm"))]
1580 pub fn recover(
1581 &mut self,
1582 raw_display_handle: raw_window_handle::RawDisplayHandle,
1583 raw_window_handle: raw_window_handle::RawWindowHandle,
1584 ) -> anyhow::Result<()> {
1585 let gpu_context = self.context.as_ref().expect("recover requires gpu_context");
1586
1587 // Check if another window already recovered the context
1588 let needs_new_context = gpu_context
1589 .borrow()
1590 .as_ref()
1591 .is_none_or(|ctx| ctx.device_lost());
1592
1593 let surface = if needs_new_context {
1594 log::warn!("GPU device lost, recreating context...");
1595
1596 // Drop old resources to release Arc<Device>/Arc<Queue> and GPU resources
1597 self.resources = None;
1598 *gpu_context.borrow_mut() = None;
1599
1600 // Wait for GPU driver to stabilize (350ms copied from windows :shrug:)
1601 std::thread::sleep(std::time::Duration::from_millis(350));
1602
1603 let instance = WgpuContext::instance();
1604 let surface = create_surface(&instance, raw_display_handle, raw_window_handle)?;
1605 let new_context = WgpuContext::new(instance, &surface, self.compositor_gpu)?;
1606 *gpu_context.borrow_mut() = Some(new_context);
1607 surface
1608 } else {
1609 let ctx_ref = gpu_context.borrow();
1610 let instance = &ctx_ref.as_ref().unwrap().instance;
1611 create_surface(instance, raw_display_handle, raw_window_handle)?
1612 };
1613
1614 let config = WgpuSurfaceConfig {
1615 size: gpui::Size {
1616 width: gpui::DevicePixels(self.surface_config.width as i32),
1617 height: gpui::DevicePixels(self.surface_config.height as i32),
1618 },
1619 transparent: self.surface_config.alpha_mode != wgpu::CompositeAlphaMode::Opaque,
1620 };
1621 let gpu_context = Rc::clone(gpu_context);
1622 let ctx_ref = gpu_context.borrow();
1623 let context = ctx_ref.as_ref().expect("context should exist");
1624
1625 self.resources = None;
1626 self.atlas
1627 .handle_device_lost(Arc::clone(&context.device), Arc::clone(&context.queue));
1628
1629 *self = Self::new_internal(
1630 Some(gpu_context.clone()),
1631 context,
1632 surface,
1633 config,
1634 self.compositor_gpu,
1635 self.atlas.clone(),
1636 )?;
1637
1638 log::info!("GPU recovery complete");
1639 Ok(())
1640 }
1641}
1642
1643#[cfg(not(target_family = "wasm"))]
1644fn create_surface(
1645 instance: &wgpu::Instance,
1646 raw_display_handle: raw_window_handle::RawDisplayHandle,
1647 raw_window_handle: raw_window_handle::RawWindowHandle,
1648) -> anyhow::Result<wgpu::Surface<'static>> {
1649 unsafe {
1650 instance
1651 .create_surface_unsafe(wgpu::SurfaceTargetUnsafe::RawHandle {
1652 raw_display_handle,
1653 raw_window_handle,
1654 })
1655 .map_err(|e| anyhow::anyhow!("{e}"))
1656 }
1657}
1658
1659struct RenderingParameters {
1660 path_sample_count: u32,
1661 gamma_ratios: [f32; 4],
1662 grayscale_enhanced_contrast: f32,
1663 subpixel_enhanced_contrast: f32,
1664}
1665
1666impl RenderingParameters {
1667 fn new(adapter: &wgpu::Adapter, surface_format: wgpu::TextureFormat) -> Self {
1668 use std::env;
1669
1670 let format_features = adapter.get_texture_format_features(surface_format);
1671 let path_sample_count = [4, 2, 1]
1672 .into_iter()
1673 .find(|&n| format_features.flags.sample_count_supported(n))
1674 .unwrap_or(1);
1675
1676 let gamma = env::var("ZED_FONTS_GAMMA")
1677 .ok()
1678 .and_then(|v| v.parse().ok())
1679 .unwrap_or(1.8_f32)
1680 .clamp(1.0, 2.2);
1681 let gamma_ratios = get_gamma_correction_ratios(gamma);
1682
1683 let grayscale_enhanced_contrast = env::var("ZED_FONTS_GRAYSCALE_ENHANCED_CONTRAST")
1684 .ok()
1685 .and_then(|v| v.parse().ok())
1686 .unwrap_or(1.0_f32)
1687 .max(0.0);
1688
1689 let subpixel_enhanced_contrast = env::var("ZED_FONTS_SUBPIXEL_ENHANCED_CONTRAST")
1690 .ok()
1691 .and_then(|v| v.parse().ok())
1692 .unwrap_or(0.5_f32)
1693 .max(0.0);
1694
1695 Self {
1696 path_sample_count,
1697 gamma_ratios,
1698 grayscale_enhanced_contrast,
1699 subpixel_enhanced_contrast,
1700 }
1701 }
1702}