1use crate::{CompositorGpuHint, WgpuAtlas, WgpuContext};
2use bytemuck::{Pod, Zeroable};
3use gpui::{
4 AtlasTextureId, Background, Bounds, DevicePixels, GpuSpecs, MonochromeSprite, Path, Point,
5 PolychromeSprite, PrimitiveBatch, Quad, ScaledPixels, Scene, Shadow, Size, SubpixelSprite,
6 Underline, get_gamma_correction_ratios,
7};
8use log::warn;
9#[cfg(not(target_family = "wasm"))]
10use raw_window_handle::{HasDisplayHandle, HasWindowHandle};
11use std::cell::RefCell;
12use std::num::NonZeroU64;
13use std::rc::Rc;
14use std::sync::{Arc, Mutex};
15
16#[repr(C)]
17#[derive(Clone, Copy, Pod, Zeroable)]
18struct GlobalParams {
19 viewport_size: [f32; 2],
20 premultiplied_alpha: u32,
21 pad: u32,
22}
23
24#[repr(C)]
25#[derive(Clone, Copy, Pod, Zeroable)]
26struct PodBounds {
27 origin: [f32; 2],
28 size: [f32; 2],
29}
30
31impl From<Bounds<ScaledPixels>> for PodBounds {
32 fn from(bounds: Bounds<ScaledPixels>) -> Self {
33 Self {
34 origin: [bounds.origin.x.0, bounds.origin.y.0],
35 size: [bounds.size.width.0, bounds.size.height.0],
36 }
37 }
38}
39
40#[repr(C)]
41#[derive(Clone, Copy, Pod, Zeroable)]
42struct SurfaceParams {
43 bounds: PodBounds,
44 content_mask: PodBounds,
45}
46
47#[repr(C)]
48#[derive(Clone, Copy, Pod, Zeroable)]
49struct GammaParams {
50 gamma_ratios: [f32; 4],
51 grayscale_enhanced_contrast: f32,
52 subpixel_enhanced_contrast: f32,
53 _pad: [f32; 2],
54}
55
56#[derive(Clone, Debug)]
57#[repr(C)]
58struct PathSprite {
59 bounds: Bounds<ScaledPixels>,
60}
61
62#[derive(Clone, Debug)]
63#[repr(C)]
64struct PathRasterizationVertex {
65 xy_position: Point<ScaledPixels>,
66 st_position: Point<f32>,
67 color: Background,
68 bounds: Bounds<ScaledPixels>,
69}
70
71pub struct WgpuSurfaceConfig {
72 pub size: Size<DevicePixels>,
73 pub transparent: bool,
74}
75
76struct WgpuPipelines {
77 quads: wgpu::RenderPipeline,
78 shadows: wgpu::RenderPipeline,
79 path_rasterization: wgpu::RenderPipeline,
80 paths: wgpu::RenderPipeline,
81 underlines: wgpu::RenderPipeline,
82 mono_sprites: wgpu::RenderPipeline,
83 subpixel_sprites: Option<wgpu::RenderPipeline>,
84 poly_sprites: wgpu::RenderPipeline,
85 #[allow(dead_code)]
86 surfaces: wgpu::RenderPipeline,
87}
88
89struct WgpuBindGroupLayouts {
90 globals: wgpu::BindGroupLayout,
91 instances: wgpu::BindGroupLayout,
92 instances_with_texture: wgpu::BindGroupLayout,
93 surfaces: wgpu::BindGroupLayout,
94}
95
96/// Shared GPU context reference, used to coordinate device recovery across multiple windows.
97pub type GpuContext = Rc<RefCell<Option<WgpuContext>>>;
98
99/// GPU resources that must be dropped together during device recovery.
100struct WgpuResources {
101 device: Arc<wgpu::Device>,
102 queue: Arc<wgpu::Queue>,
103 surface: wgpu::Surface<'static>,
104 pipelines: WgpuPipelines,
105 bind_group_layouts: WgpuBindGroupLayouts,
106 atlas_sampler: wgpu::Sampler,
107 globals_buffer: wgpu::Buffer,
108 globals_bind_group: wgpu::BindGroup,
109 path_globals_bind_group: wgpu::BindGroup,
110 instance_buffer: wgpu::Buffer,
111 path_intermediate_texture: Option<wgpu::Texture>,
112 path_intermediate_view: Option<wgpu::TextureView>,
113 path_msaa_texture: Option<wgpu::Texture>,
114 path_msaa_view: Option<wgpu::TextureView>,
115}
116
117pub struct WgpuRenderer {
118 /// Shared GPU context for device recovery coordination (unused on WASM).
119 #[allow(dead_code)]
120 context: Option<GpuContext>,
121 /// Compositor GPU hint for adapter selection (unused on WASM).
122 #[allow(dead_code)]
123 compositor_gpu: Option<CompositorGpuHint>,
124 resources: Option<WgpuResources>,
125 surface_config: wgpu::SurfaceConfiguration,
126 atlas: Arc<WgpuAtlas>,
127 path_globals_offset: u64,
128 gamma_offset: u64,
129 instance_buffer_capacity: u64,
130 max_buffer_size: u64,
131 storage_buffer_alignment: u64,
132 rendering_params: RenderingParameters,
133 dual_source_blending: bool,
134 adapter_info: wgpu::AdapterInfo,
135 transparent_alpha_mode: wgpu::CompositeAlphaMode,
136 opaque_alpha_mode: wgpu::CompositeAlphaMode,
137 max_texture_size: u32,
138 last_error: Arc<Mutex<Option<String>>>,
139 failed_frame_count: u32,
140 device_lost: std::sync::Arc<std::sync::atomic::AtomicBool>,
141}
142
143impl WgpuRenderer {
144 fn resources(&self) -> &WgpuResources {
145 self.resources
146 .as_ref()
147 .expect("GPU resources not available")
148 }
149
150 fn resources_mut(&mut self) -> &mut WgpuResources {
151 self.resources
152 .as_mut()
153 .expect("GPU resources not available")
154 }
155
156 /// Creates a new WgpuRenderer from raw window handles.
157 ///
158 /// The `gpu_context` is a shared reference that coordinates GPU context across
159 /// multiple windows. The first window to create a renderer will initialize the
160 /// context; subsequent windows will share it.
161 ///
162 /// # Safety
163 /// The caller must ensure that the window handle remains valid for the lifetime
164 /// of the returned renderer.
165 #[cfg(not(target_family = "wasm"))]
166 pub fn new<W>(
167 gpu_context: GpuContext,
168 window: &W,
169 config: WgpuSurfaceConfig,
170 compositor_gpu: Option<CompositorGpuHint>,
171 ) -> anyhow::Result<Self>
172 where
173 W: HasWindowHandle + HasDisplayHandle + std::fmt::Debug + Send + Sync + Clone + 'static,
174 {
175 let window_handle = window
176 .window_handle()
177 .map_err(|e| anyhow::anyhow!("Failed to get window handle: {e}"))?;
178
179 let target = wgpu::SurfaceTargetUnsafe::RawHandle {
180 // Fall back to the display handle already provided via InstanceDescriptor::display.
181 raw_display_handle: None,
182 raw_window_handle: window_handle.as_raw(),
183 };
184
185 // Use the existing context's instance if available, otherwise create a new one.
186 // The surface must be created with the same instance that will be used for
187 // adapter selection, otherwise wgpu will panic.
188 let instance = gpu_context
189 .borrow()
190 .as_ref()
191 .map(|ctx| ctx.instance.clone())
192 .unwrap_or_else(|| WgpuContext::instance(Box::new(window.clone())));
193
194 // Safety: The caller guarantees that the window handle is valid for the
195 // lifetime of this renderer. In practice, the RawWindow struct is created
196 // from the native window handles and the surface is dropped before the window.
197 let surface = unsafe {
198 instance
199 .create_surface_unsafe(target)
200 .map_err(|e| anyhow::anyhow!("Failed to create surface: {e}"))?
201 };
202
203 let mut ctx_ref = gpu_context.borrow_mut();
204 let context = match ctx_ref.as_mut() {
205 Some(context) => {
206 context.check_compatible_with_surface(&surface)?;
207 context
208 }
209 None => ctx_ref.insert(WgpuContext::new(instance, &surface, compositor_gpu)?),
210 };
211
212 let atlas = Arc::new(WgpuAtlas::new(
213 Arc::clone(&context.device),
214 Arc::clone(&context.queue),
215 ));
216
217 Self::new_internal(
218 Some(Rc::clone(&gpu_context)),
219 context,
220 surface,
221 config,
222 compositor_gpu,
223 atlas,
224 )
225 }
226
227 #[cfg(target_family = "wasm")]
228 pub fn new_from_canvas(
229 context: &WgpuContext,
230 canvas: &web_sys::HtmlCanvasElement,
231 config: WgpuSurfaceConfig,
232 ) -> anyhow::Result<Self> {
233 let surface = context
234 .instance
235 .create_surface(wgpu::SurfaceTarget::Canvas(canvas.clone()))
236 .map_err(|e| anyhow::anyhow!("Failed to create surface: {e}"))?;
237
238 let atlas = Arc::new(WgpuAtlas::new(
239 Arc::clone(&context.device),
240 Arc::clone(&context.queue),
241 ));
242
243 Self::new_internal(None, context, surface, config, None, atlas)
244 }
245
246 fn new_internal(
247 gpu_context: Option<GpuContext>,
248 context: &WgpuContext,
249 surface: wgpu::Surface<'static>,
250 config: WgpuSurfaceConfig,
251 compositor_gpu: Option<CompositorGpuHint>,
252 atlas: Arc<WgpuAtlas>,
253 ) -> anyhow::Result<Self> {
254 let surface_caps = surface.get_capabilities(&context.adapter);
255 let preferred_formats = [
256 wgpu::TextureFormat::Bgra8Unorm,
257 wgpu::TextureFormat::Rgba8Unorm,
258 ];
259 let surface_format = preferred_formats
260 .iter()
261 .find(|f| surface_caps.formats.contains(f))
262 .copied()
263 .or_else(|| surface_caps.formats.iter().find(|f| !f.is_srgb()).copied())
264 .or_else(|| surface_caps.formats.first().copied())
265 .ok_or_else(|| {
266 anyhow::anyhow!(
267 "Surface reports no supported texture formats for adapter {:?}",
268 context.adapter.get_info().name
269 )
270 })?;
271
272 let pick_alpha_mode =
273 |preferences: &[wgpu::CompositeAlphaMode]| -> anyhow::Result<wgpu::CompositeAlphaMode> {
274 preferences
275 .iter()
276 .find(|p| surface_caps.alpha_modes.contains(p))
277 .copied()
278 .or_else(|| surface_caps.alpha_modes.first().copied())
279 .ok_or_else(|| {
280 anyhow::anyhow!(
281 "Surface reports no supported alpha modes for adapter {:?}",
282 context.adapter.get_info().name
283 )
284 })
285 };
286
287 let transparent_alpha_mode = pick_alpha_mode(&[
288 wgpu::CompositeAlphaMode::PreMultiplied,
289 wgpu::CompositeAlphaMode::Inherit,
290 ])?;
291
292 let opaque_alpha_mode = pick_alpha_mode(&[
293 wgpu::CompositeAlphaMode::Opaque,
294 wgpu::CompositeAlphaMode::Inherit,
295 ])?;
296
297 let alpha_mode = if config.transparent {
298 transparent_alpha_mode
299 } else {
300 opaque_alpha_mode
301 };
302
303 let device = Arc::clone(&context.device);
304 let max_texture_size = device.limits().max_texture_dimension_2d;
305
306 let requested_width = config.size.width.0 as u32;
307 let requested_height = config.size.height.0 as u32;
308 let clamped_width = requested_width.min(max_texture_size);
309 let clamped_height = requested_height.min(max_texture_size);
310
311 if clamped_width != requested_width || clamped_height != requested_height {
312 warn!(
313 "Requested surface size ({}, {}) exceeds maximum texture dimension {}. \
314 Clamping to ({}, {}). Window content may not fill the entire window.",
315 requested_width, requested_height, max_texture_size, clamped_width, clamped_height
316 );
317 }
318
319 let surface_config = wgpu::SurfaceConfiguration {
320 usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
321 format: surface_format,
322 width: clamped_width.max(1),
323 height: clamped_height.max(1),
324 present_mode: wgpu::PresentMode::Fifo,
325 desired_maximum_frame_latency: 2,
326 alpha_mode,
327 view_formats: vec![],
328 };
329 // Configure the surface immediately. The adapter selection process already validated
330 // that this adapter can successfully configure this surface.
331 surface.configure(&context.device, &surface_config);
332
333 let queue = Arc::clone(&context.queue);
334 let dual_source_blending = context.supports_dual_source_blending();
335
336 let rendering_params = RenderingParameters::new(&context.adapter, surface_format);
337 let bind_group_layouts = Self::create_bind_group_layouts(&device);
338 let pipelines = Self::create_pipelines(
339 &device,
340 &bind_group_layouts,
341 surface_format,
342 alpha_mode,
343 rendering_params.path_sample_count,
344 dual_source_blending,
345 );
346
347 let atlas_sampler = device.create_sampler(&wgpu::SamplerDescriptor {
348 label: Some("atlas_sampler"),
349 mag_filter: wgpu::FilterMode::Linear,
350 min_filter: wgpu::FilterMode::Linear,
351 ..Default::default()
352 });
353
354 let uniform_alignment = device.limits().min_uniform_buffer_offset_alignment as u64;
355 let globals_size = std::mem::size_of::<GlobalParams>() as u64;
356 let gamma_size = std::mem::size_of::<GammaParams>() as u64;
357 let path_globals_offset = globals_size.next_multiple_of(uniform_alignment);
358 let gamma_offset = (path_globals_offset + globals_size).next_multiple_of(uniform_alignment);
359
360 let globals_buffer = device.create_buffer(&wgpu::BufferDescriptor {
361 label: Some("globals_buffer"),
362 size: gamma_offset + gamma_size,
363 usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
364 mapped_at_creation: false,
365 });
366
367 let max_buffer_size = device.limits().max_buffer_size;
368 let storage_buffer_alignment = device.limits().min_storage_buffer_offset_alignment as u64;
369 let initial_instance_buffer_capacity = 2 * 1024 * 1024;
370 let instance_buffer = device.create_buffer(&wgpu::BufferDescriptor {
371 label: Some("instance_buffer"),
372 size: initial_instance_buffer_capacity,
373 usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
374 mapped_at_creation: false,
375 });
376
377 let globals_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
378 label: Some("globals_bind_group"),
379 layout: &bind_group_layouts.globals,
380 entries: &[
381 wgpu::BindGroupEntry {
382 binding: 0,
383 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
384 buffer: &globals_buffer,
385 offset: 0,
386 size: Some(NonZeroU64::new(globals_size).unwrap()),
387 }),
388 },
389 wgpu::BindGroupEntry {
390 binding: 1,
391 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
392 buffer: &globals_buffer,
393 offset: gamma_offset,
394 size: Some(NonZeroU64::new(gamma_size).unwrap()),
395 }),
396 },
397 ],
398 });
399
400 let path_globals_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
401 label: Some("path_globals_bind_group"),
402 layout: &bind_group_layouts.globals,
403 entries: &[
404 wgpu::BindGroupEntry {
405 binding: 0,
406 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
407 buffer: &globals_buffer,
408 offset: path_globals_offset,
409 size: Some(NonZeroU64::new(globals_size).unwrap()),
410 }),
411 },
412 wgpu::BindGroupEntry {
413 binding: 1,
414 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
415 buffer: &globals_buffer,
416 offset: gamma_offset,
417 size: Some(NonZeroU64::new(gamma_size).unwrap()),
418 }),
419 },
420 ],
421 });
422
423 let adapter_info = context.adapter.get_info();
424
425 let last_error: Arc<Mutex<Option<String>>> = Arc::new(Mutex::new(None));
426 let last_error_clone = Arc::clone(&last_error);
427 device.on_uncaptured_error(Arc::new(move |error| {
428 let mut guard = last_error_clone.lock().unwrap();
429 *guard = Some(error.to_string());
430 }));
431
432 let resources = WgpuResources {
433 device,
434 queue,
435 surface,
436 pipelines,
437 bind_group_layouts,
438 atlas_sampler,
439 globals_buffer,
440 globals_bind_group,
441 path_globals_bind_group,
442 instance_buffer,
443 // Defer intermediate texture creation to first draw call via ensure_intermediate_textures().
444 // This avoids panics when the device/surface is in an invalid state during initialization.
445 path_intermediate_texture: None,
446 path_intermediate_view: None,
447 path_msaa_texture: None,
448 path_msaa_view: None,
449 };
450
451 Ok(Self {
452 context: gpu_context,
453 compositor_gpu,
454 resources: Some(resources),
455 surface_config,
456 atlas,
457 path_globals_offset,
458 gamma_offset,
459 instance_buffer_capacity: initial_instance_buffer_capacity,
460 max_buffer_size,
461 storage_buffer_alignment,
462 rendering_params,
463 dual_source_blending,
464 adapter_info,
465 transparent_alpha_mode,
466 opaque_alpha_mode,
467 max_texture_size,
468 last_error,
469 failed_frame_count: 0,
470 device_lost: context.device_lost_flag(),
471 })
472 }
473
474 fn create_bind_group_layouts(device: &wgpu::Device) -> WgpuBindGroupLayouts {
475 let globals =
476 device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
477 label: Some("globals_layout"),
478 entries: &[
479 wgpu::BindGroupLayoutEntry {
480 binding: 0,
481 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
482 ty: wgpu::BindingType::Buffer {
483 ty: wgpu::BufferBindingType::Uniform,
484 has_dynamic_offset: false,
485 min_binding_size: NonZeroU64::new(
486 std::mem::size_of::<GlobalParams>() as u64
487 ),
488 },
489 count: None,
490 },
491 wgpu::BindGroupLayoutEntry {
492 binding: 1,
493 visibility: wgpu::ShaderStages::FRAGMENT,
494 ty: wgpu::BindingType::Buffer {
495 ty: wgpu::BufferBindingType::Uniform,
496 has_dynamic_offset: false,
497 min_binding_size: NonZeroU64::new(
498 std::mem::size_of::<GammaParams>() as u64
499 ),
500 },
501 count: None,
502 },
503 ],
504 });
505
506 let storage_buffer_entry = |binding: u32| wgpu::BindGroupLayoutEntry {
507 binding,
508 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
509 ty: wgpu::BindingType::Buffer {
510 ty: wgpu::BufferBindingType::Storage { read_only: true },
511 has_dynamic_offset: false,
512 min_binding_size: None,
513 },
514 count: None,
515 };
516
517 let instances = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
518 label: Some("instances_layout"),
519 entries: &[storage_buffer_entry(0)],
520 });
521
522 let instances_with_texture =
523 device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
524 label: Some("instances_with_texture_layout"),
525 entries: &[
526 storage_buffer_entry(0),
527 wgpu::BindGroupLayoutEntry {
528 binding: 1,
529 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
530 ty: wgpu::BindingType::Texture {
531 sample_type: wgpu::TextureSampleType::Float { filterable: true },
532 view_dimension: wgpu::TextureViewDimension::D2,
533 multisampled: false,
534 },
535 count: None,
536 },
537 wgpu::BindGroupLayoutEntry {
538 binding: 2,
539 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
540 ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
541 count: None,
542 },
543 ],
544 });
545
546 let surfaces = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
547 label: Some("surfaces_layout"),
548 entries: &[
549 wgpu::BindGroupLayoutEntry {
550 binding: 0,
551 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
552 ty: wgpu::BindingType::Buffer {
553 ty: wgpu::BufferBindingType::Uniform,
554 has_dynamic_offset: false,
555 min_binding_size: NonZeroU64::new(
556 std::mem::size_of::<SurfaceParams>() as u64
557 ),
558 },
559 count: None,
560 },
561 wgpu::BindGroupLayoutEntry {
562 binding: 1,
563 visibility: wgpu::ShaderStages::FRAGMENT,
564 ty: wgpu::BindingType::Texture {
565 sample_type: wgpu::TextureSampleType::Float { filterable: true },
566 view_dimension: wgpu::TextureViewDimension::D2,
567 multisampled: false,
568 },
569 count: None,
570 },
571 wgpu::BindGroupLayoutEntry {
572 binding: 2,
573 visibility: wgpu::ShaderStages::FRAGMENT,
574 ty: wgpu::BindingType::Texture {
575 sample_type: wgpu::TextureSampleType::Float { filterable: true },
576 view_dimension: wgpu::TextureViewDimension::D2,
577 multisampled: false,
578 },
579 count: None,
580 },
581 wgpu::BindGroupLayoutEntry {
582 binding: 3,
583 visibility: wgpu::ShaderStages::FRAGMENT,
584 ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
585 count: None,
586 },
587 ],
588 });
589
590 WgpuBindGroupLayouts {
591 globals,
592 instances,
593 instances_with_texture,
594 surfaces,
595 }
596 }
597
598 fn create_pipelines(
599 device: &wgpu::Device,
600 layouts: &WgpuBindGroupLayouts,
601 surface_format: wgpu::TextureFormat,
602 alpha_mode: wgpu::CompositeAlphaMode,
603 path_sample_count: u32,
604 dual_source_blending: bool,
605 ) -> WgpuPipelines {
606 let base_shader_source = include_str!("shaders.wgsl");
607 let shader_module = device.create_shader_module(wgpu::ShaderModuleDescriptor {
608 label: Some("gpui_shaders"),
609 source: wgpu::ShaderSource::Wgsl(std::borrow::Cow::Borrowed(base_shader_source)),
610 });
611
612 let subpixel_shader_source = include_str!("shaders_subpixel.wgsl");
613 let subpixel_shader_module = if dual_source_blending {
614 let combined = format!(
615 "enable dual_source_blending;\n{base_shader_source}\n{subpixel_shader_source}"
616 );
617 Some(device.create_shader_module(wgpu::ShaderModuleDescriptor {
618 label: Some("gpui_subpixel_shaders"),
619 source: wgpu::ShaderSource::Wgsl(std::borrow::Cow::Owned(combined)),
620 }))
621 } else {
622 None
623 };
624
625 let blend_mode = match alpha_mode {
626 wgpu::CompositeAlphaMode::PreMultiplied => {
627 wgpu::BlendState::PREMULTIPLIED_ALPHA_BLENDING
628 }
629 _ => wgpu::BlendState::ALPHA_BLENDING,
630 };
631
632 let color_target = wgpu::ColorTargetState {
633 format: surface_format,
634 blend: Some(blend_mode),
635 write_mask: wgpu::ColorWrites::ALL,
636 };
637
638 let create_pipeline = |name: &str,
639 vs_entry: &str,
640 fs_entry: &str,
641 globals_layout: &wgpu::BindGroupLayout,
642 data_layout: &wgpu::BindGroupLayout,
643 topology: wgpu::PrimitiveTopology,
644 color_targets: &[Option<wgpu::ColorTargetState>],
645 sample_count: u32,
646 module: &wgpu::ShaderModule| {
647 let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
648 label: Some(&format!("{name}_layout")),
649 bind_group_layouts: &[Some(globals_layout), Some(data_layout)],
650 immediate_size: 0,
651 });
652
653 device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
654 label: Some(name),
655 layout: Some(&pipeline_layout),
656 vertex: wgpu::VertexState {
657 module,
658 entry_point: Some(vs_entry),
659 buffers: &[],
660 compilation_options: wgpu::PipelineCompilationOptions::default(),
661 },
662 fragment: Some(wgpu::FragmentState {
663 module,
664 entry_point: Some(fs_entry),
665 targets: color_targets,
666 compilation_options: wgpu::PipelineCompilationOptions::default(),
667 }),
668 primitive: wgpu::PrimitiveState {
669 topology,
670 strip_index_format: None,
671 front_face: wgpu::FrontFace::Ccw,
672 cull_mode: None,
673 polygon_mode: wgpu::PolygonMode::Fill,
674 unclipped_depth: false,
675 conservative: false,
676 },
677 depth_stencil: None,
678 multisample: wgpu::MultisampleState {
679 count: sample_count,
680 mask: !0,
681 alpha_to_coverage_enabled: false,
682 },
683 multiview_mask: None,
684 cache: None,
685 })
686 };
687
688 let quads = create_pipeline(
689 "quads",
690 "vs_quad",
691 "fs_quad",
692 &layouts.globals,
693 &layouts.instances,
694 wgpu::PrimitiveTopology::TriangleStrip,
695 &[Some(color_target.clone())],
696 1,
697 &shader_module,
698 );
699
700 let shadows = create_pipeline(
701 "shadows",
702 "vs_shadow",
703 "fs_shadow",
704 &layouts.globals,
705 &layouts.instances,
706 wgpu::PrimitiveTopology::TriangleStrip,
707 &[Some(color_target.clone())],
708 1,
709 &shader_module,
710 );
711
712 let path_rasterization = create_pipeline(
713 "path_rasterization",
714 "vs_path_rasterization",
715 "fs_path_rasterization",
716 &layouts.globals,
717 &layouts.instances,
718 wgpu::PrimitiveTopology::TriangleList,
719 &[Some(wgpu::ColorTargetState {
720 format: surface_format,
721 blend: Some(wgpu::BlendState::PREMULTIPLIED_ALPHA_BLENDING),
722 write_mask: wgpu::ColorWrites::ALL,
723 })],
724 path_sample_count,
725 &shader_module,
726 );
727
728 let paths_blend = wgpu::BlendState {
729 color: wgpu::BlendComponent {
730 src_factor: wgpu::BlendFactor::One,
731 dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
732 operation: wgpu::BlendOperation::Add,
733 },
734 alpha: wgpu::BlendComponent {
735 src_factor: wgpu::BlendFactor::One,
736 dst_factor: wgpu::BlendFactor::One,
737 operation: wgpu::BlendOperation::Add,
738 },
739 };
740
741 let paths = create_pipeline(
742 "paths",
743 "vs_path",
744 "fs_path",
745 &layouts.globals,
746 &layouts.instances_with_texture,
747 wgpu::PrimitiveTopology::TriangleStrip,
748 &[Some(wgpu::ColorTargetState {
749 format: surface_format,
750 blend: Some(paths_blend),
751 write_mask: wgpu::ColorWrites::ALL,
752 })],
753 1,
754 &shader_module,
755 );
756
757 let underlines = create_pipeline(
758 "underlines",
759 "vs_underline",
760 "fs_underline",
761 &layouts.globals,
762 &layouts.instances,
763 wgpu::PrimitiveTopology::TriangleStrip,
764 &[Some(color_target.clone())],
765 1,
766 &shader_module,
767 );
768
769 let mono_sprites = create_pipeline(
770 "mono_sprites",
771 "vs_mono_sprite",
772 "fs_mono_sprite",
773 &layouts.globals,
774 &layouts.instances_with_texture,
775 wgpu::PrimitiveTopology::TriangleStrip,
776 &[Some(color_target.clone())],
777 1,
778 &shader_module,
779 );
780
781 let subpixel_sprites = if let Some(subpixel_module) = &subpixel_shader_module {
782 let subpixel_blend = wgpu::BlendState {
783 color: wgpu::BlendComponent {
784 src_factor: wgpu::BlendFactor::Src1,
785 dst_factor: wgpu::BlendFactor::OneMinusSrc1,
786 operation: wgpu::BlendOperation::Add,
787 },
788 alpha: wgpu::BlendComponent {
789 src_factor: wgpu::BlendFactor::One,
790 dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
791 operation: wgpu::BlendOperation::Add,
792 },
793 };
794
795 Some(create_pipeline(
796 "subpixel_sprites",
797 "vs_subpixel_sprite",
798 "fs_subpixel_sprite",
799 &layouts.globals,
800 &layouts.instances_with_texture,
801 wgpu::PrimitiveTopology::TriangleStrip,
802 &[Some(wgpu::ColorTargetState {
803 format: surface_format,
804 blend: Some(subpixel_blend),
805 write_mask: wgpu::ColorWrites::COLOR,
806 })],
807 1,
808 subpixel_module,
809 ))
810 } else {
811 None
812 };
813
814 let poly_sprites = create_pipeline(
815 "poly_sprites",
816 "vs_poly_sprite",
817 "fs_poly_sprite",
818 &layouts.globals,
819 &layouts.instances_with_texture,
820 wgpu::PrimitiveTopology::TriangleStrip,
821 &[Some(color_target.clone())],
822 1,
823 &shader_module,
824 );
825
826 let surfaces = create_pipeline(
827 "surfaces",
828 "vs_surface",
829 "fs_surface",
830 &layouts.globals,
831 &layouts.surfaces,
832 wgpu::PrimitiveTopology::TriangleStrip,
833 &[Some(color_target)],
834 1,
835 &shader_module,
836 );
837
838 WgpuPipelines {
839 quads,
840 shadows,
841 path_rasterization,
842 paths,
843 underlines,
844 mono_sprites,
845 subpixel_sprites,
846 poly_sprites,
847 surfaces,
848 }
849 }
850
851 fn create_path_intermediate(
852 device: &wgpu::Device,
853 format: wgpu::TextureFormat,
854 width: u32,
855 height: u32,
856 ) -> (wgpu::Texture, wgpu::TextureView) {
857 let texture = device.create_texture(&wgpu::TextureDescriptor {
858 label: Some("path_intermediate"),
859 size: wgpu::Extent3d {
860 width: width.max(1),
861 height: height.max(1),
862 depth_or_array_layers: 1,
863 },
864 mip_level_count: 1,
865 sample_count: 1,
866 dimension: wgpu::TextureDimension::D2,
867 format,
868 usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::TEXTURE_BINDING,
869 view_formats: &[],
870 });
871 let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
872 (texture, view)
873 }
874
875 fn create_msaa_if_needed(
876 device: &wgpu::Device,
877 format: wgpu::TextureFormat,
878 width: u32,
879 height: u32,
880 sample_count: u32,
881 ) -> Option<(wgpu::Texture, wgpu::TextureView)> {
882 if sample_count <= 1 {
883 return None;
884 }
885 let texture = device.create_texture(&wgpu::TextureDescriptor {
886 label: Some("path_msaa"),
887 size: wgpu::Extent3d {
888 width: width.max(1),
889 height: height.max(1),
890 depth_or_array_layers: 1,
891 },
892 mip_level_count: 1,
893 sample_count,
894 dimension: wgpu::TextureDimension::D2,
895 format,
896 usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
897 view_formats: &[],
898 });
899 let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
900 Some((texture, view))
901 }
902
903 pub fn update_drawable_size(&mut self, size: Size<DevicePixels>) {
904 let width = size.width.0 as u32;
905 let height = size.height.0 as u32;
906
907 if width != self.surface_config.width || height != self.surface_config.height {
908 let clamped_width = width.min(self.max_texture_size);
909 let clamped_height = height.min(self.max_texture_size);
910
911 if clamped_width != width || clamped_height != height {
912 warn!(
913 "Requested surface size ({}, {}) exceeds maximum texture dimension {}. \
914 Clamping to ({}, {}). Window content may not fill the entire window.",
915 width, height, self.max_texture_size, clamped_width, clamped_height
916 );
917 }
918
919 self.surface_config.width = clamped_width.max(1);
920 self.surface_config.height = clamped_height.max(1);
921 let surface_config = self.surface_config.clone();
922
923 let resources = self.resources_mut();
924
925 // Wait for any in-flight GPU work to complete before destroying textures
926 if let Err(e) = resources.device.poll(wgpu::PollType::Wait {
927 submission_index: None,
928 timeout: None,
929 }) {
930 warn!("Failed to poll device during resize: {e:?}");
931 }
932
933 // Destroy old textures before allocating new ones to avoid GPU memory spikes
934 if let Some(ref texture) = resources.path_intermediate_texture {
935 texture.destroy();
936 }
937 if let Some(ref texture) = resources.path_msaa_texture {
938 texture.destroy();
939 }
940
941 resources
942 .surface
943 .configure(&resources.device, &surface_config);
944
945 // Invalidate intermediate textures - they will be lazily recreated
946 // in draw() after we confirm the surface is healthy. This avoids
947 // panics when the device/surface is in an invalid state during resize.
948 resources.path_intermediate_texture = None;
949 resources.path_intermediate_view = None;
950 resources.path_msaa_texture = None;
951 resources.path_msaa_view = None;
952 }
953 }
954
955 fn ensure_intermediate_textures(&mut self) {
956 if self.resources().path_intermediate_texture.is_some() {
957 return;
958 }
959
960 let format = self.surface_config.format;
961 let width = self.surface_config.width;
962 let height = self.surface_config.height;
963 let path_sample_count = self.rendering_params.path_sample_count;
964 let resources = self.resources_mut();
965
966 let (t, v) = Self::create_path_intermediate(&resources.device, format, width, height);
967 resources.path_intermediate_texture = Some(t);
968 resources.path_intermediate_view = Some(v);
969
970 let (path_msaa_texture, path_msaa_view) = Self::create_msaa_if_needed(
971 &resources.device,
972 format,
973 width,
974 height,
975 path_sample_count,
976 )
977 .map(|(t, v)| (Some(t), Some(v)))
978 .unwrap_or((None, None));
979 resources.path_msaa_texture = path_msaa_texture;
980 resources.path_msaa_view = path_msaa_view;
981 }
982
983 pub fn update_transparency(&mut self, transparent: bool) {
984 let new_alpha_mode = if transparent {
985 self.transparent_alpha_mode
986 } else {
987 self.opaque_alpha_mode
988 };
989
990 if new_alpha_mode != self.surface_config.alpha_mode {
991 self.surface_config.alpha_mode = new_alpha_mode;
992 let surface_config = self.surface_config.clone();
993 let path_sample_count = self.rendering_params.path_sample_count;
994 let dual_source_blending = self.dual_source_blending;
995 let resources = self.resources_mut();
996 resources
997 .surface
998 .configure(&resources.device, &surface_config);
999 resources.pipelines = Self::create_pipelines(
1000 &resources.device,
1001 &resources.bind_group_layouts,
1002 surface_config.format,
1003 surface_config.alpha_mode,
1004 path_sample_count,
1005 dual_source_blending,
1006 );
1007 }
1008 }
1009
1010 #[allow(dead_code)]
1011 pub fn viewport_size(&self) -> Size<DevicePixels> {
1012 Size {
1013 width: DevicePixels(self.surface_config.width as i32),
1014 height: DevicePixels(self.surface_config.height as i32),
1015 }
1016 }
1017
1018 pub fn sprite_atlas(&self) -> &Arc<WgpuAtlas> {
1019 &self.atlas
1020 }
1021
1022 pub fn supports_dual_source_blending(&self) -> bool {
1023 self.dual_source_blending
1024 }
1025
1026 pub fn gpu_specs(&self) -> GpuSpecs {
1027 GpuSpecs {
1028 is_software_emulated: self.adapter_info.device_type == wgpu::DeviceType::Cpu,
1029 device_name: self.adapter_info.name.clone(),
1030 driver_name: self.adapter_info.driver.clone(),
1031 driver_info: self.adapter_info.driver_info.clone(),
1032 }
1033 }
1034
1035 pub fn max_texture_size(&self) -> u32 {
1036 self.max_texture_size
1037 }
1038
1039 pub fn draw(&mut self, scene: &Scene) {
1040 let last_error = self.last_error.lock().unwrap().take();
1041 if let Some(error) = last_error {
1042 self.failed_frame_count += 1;
1043 log::error!(
1044 "GPU error during frame (failure {} of 20): {error}",
1045 self.failed_frame_count
1046 );
1047 if self.failed_frame_count > 20 {
1048 panic!("Too many consecutive GPU errors. Last error: {error}");
1049 }
1050 } else {
1051 self.failed_frame_count = 0;
1052 }
1053
1054 self.atlas.before_frame();
1055
1056 let frame = match self.resources().surface.get_current_texture() {
1057 wgpu::CurrentSurfaceTexture::Success(frame) => frame,
1058 wgpu::CurrentSurfaceTexture::Suboptimal(frame) => {
1059 let surface_config = self.surface_config.clone();
1060 let resources = self.resources_mut();
1061 resources
1062 .surface
1063 .configure(&resources.device, &surface_config);
1064 frame
1065 }
1066 wgpu::CurrentSurfaceTexture::Lost | wgpu::CurrentSurfaceTexture::Outdated => {
1067 let surface_config = self.surface_config.clone();
1068 let resources = self.resources_mut();
1069 resources
1070 .surface
1071 .configure(&resources.device, &surface_config);
1072 return;
1073 }
1074 wgpu::CurrentSurfaceTexture::Timeout | wgpu::CurrentSurfaceTexture::Occluded => {
1075 return;
1076 }
1077 wgpu::CurrentSurfaceTexture::Validation => {
1078 *self.last_error.lock().unwrap() =
1079 Some("Surface texture validation error".to_string());
1080 return;
1081 }
1082 };
1083
1084 // Now that we know the surface is healthy, ensure intermediate textures exist
1085 self.ensure_intermediate_textures();
1086
1087 let frame_view = frame
1088 .texture
1089 .create_view(&wgpu::TextureViewDescriptor::default());
1090
1091 let gamma_params = GammaParams {
1092 gamma_ratios: self.rendering_params.gamma_ratios,
1093 grayscale_enhanced_contrast: self.rendering_params.grayscale_enhanced_contrast,
1094 subpixel_enhanced_contrast: self.rendering_params.subpixel_enhanced_contrast,
1095 _pad: [0.0; 2],
1096 };
1097
1098 let globals = GlobalParams {
1099 viewport_size: [
1100 self.surface_config.width as f32,
1101 self.surface_config.height as f32,
1102 ],
1103 premultiplied_alpha: if self.surface_config.alpha_mode
1104 == wgpu::CompositeAlphaMode::PreMultiplied
1105 {
1106 1
1107 } else {
1108 0
1109 },
1110 pad: 0,
1111 };
1112
1113 let path_globals = GlobalParams {
1114 premultiplied_alpha: 0,
1115 ..globals
1116 };
1117
1118 {
1119 let resources = self.resources();
1120 resources.queue.write_buffer(
1121 &resources.globals_buffer,
1122 0,
1123 bytemuck::bytes_of(&globals),
1124 );
1125 resources.queue.write_buffer(
1126 &resources.globals_buffer,
1127 self.path_globals_offset,
1128 bytemuck::bytes_of(&path_globals),
1129 );
1130 resources.queue.write_buffer(
1131 &resources.globals_buffer,
1132 self.gamma_offset,
1133 bytemuck::bytes_of(&gamma_params),
1134 );
1135 }
1136
1137 loop {
1138 let mut instance_offset: u64 = 0;
1139 let mut overflow = false;
1140
1141 let mut encoder =
1142 self.resources()
1143 .device
1144 .create_command_encoder(&wgpu::CommandEncoderDescriptor {
1145 label: Some("main_encoder"),
1146 });
1147
1148 {
1149 let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
1150 label: Some("main_pass"),
1151 color_attachments: &[Some(wgpu::RenderPassColorAttachment {
1152 view: &frame_view,
1153 resolve_target: None,
1154 ops: wgpu::Operations {
1155 load: wgpu::LoadOp::Clear(wgpu::Color::TRANSPARENT),
1156 store: wgpu::StoreOp::Store,
1157 },
1158 depth_slice: None,
1159 })],
1160 depth_stencil_attachment: None,
1161 ..Default::default()
1162 });
1163
1164 for batch in scene.batches() {
1165 let ok = match batch {
1166 PrimitiveBatch::Quads(range) => {
1167 self.draw_quads(&scene.quads[range], &mut instance_offset, &mut pass)
1168 }
1169 PrimitiveBatch::Shadows(range) => self.draw_shadows(
1170 &scene.shadows[range],
1171 &mut instance_offset,
1172 &mut pass,
1173 ),
1174 PrimitiveBatch::Paths(range) => {
1175 let paths = &scene.paths[range];
1176 if paths.is_empty() {
1177 continue;
1178 }
1179
1180 drop(pass);
1181
1182 let did_draw = self.draw_paths_to_intermediate(
1183 &mut encoder,
1184 paths,
1185 &mut instance_offset,
1186 );
1187
1188 pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
1189 label: Some("main_pass_continued"),
1190 color_attachments: &[Some(wgpu::RenderPassColorAttachment {
1191 view: &frame_view,
1192 resolve_target: None,
1193 ops: wgpu::Operations {
1194 load: wgpu::LoadOp::Load,
1195 store: wgpu::StoreOp::Store,
1196 },
1197 depth_slice: None,
1198 })],
1199 depth_stencil_attachment: None,
1200 ..Default::default()
1201 });
1202
1203 if did_draw {
1204 self.draw_paths_from_intermediate(
1205 paths,
1206 &mut instance_offset,
1207 &mut pass,
1208 )
1209 } else {
1210 false
1211 }
1212 }
1213 PrimitiveBatch::Underlines(range) => self.draw_underlines(
1214 &scene.underlines[range],
1215 &mut instance_offset,
1216 &mut pass,
1217 ),
1218 PrimitiveBatch::MonochromeSprites { texture_id, range } => self
1219 .draw_monochrome_sprites(
1220 &scene.monochrome_sprites[range],
1221 texture_id,
1222 &mut instance_offset,
1223 &mut pass,
1224 ),
1225 PrimitiveBatch::SubpixelSprites { texture_id, range } => self
1226 .draw_subpixel_sprites(
1227 &scene.subpixel_sprites[range],
1228 texture_id,
1229 &mut instance_offset,
1230 &mut pass,
1231 ),
1232 PrimitiveBatch::PolychromeSprites { texture_id, range } => self
1233 .draw_polychrome_sprites(
1234 &scene.polychrome_sprites[range],
1235 texture_id,
1236 &mut instance_offset,
1237 &mut pass,
1238 ),
1239 PrimitiveBatch::Surfaces(_surfaces) => {
1240 // Surfaces are macOS-only for video playback
1241 // Not implemented for Linux/wgpu
1242 true
1243 }
1244 };
1245 if !ok {
1246 overflow = true;
1247 break;
1248 }
1249 }
1250 }
1251
1252 if overflow {
1253 drop(encoder);
1254 if self.instance_buffer_capacity >= self.max_buffer_size {
1255 log::error!(
1256 "instance buffer size grew too large: {}",
1257 self.instance_buffer_capacity
1258 );
1259 frame.present();
1260 return;
1261 }
1262 self.grow_instance_buffer();
1263 continue;
1264 }
1265
1266 self.resources()
1267 .queue
1268 .submit(std::iter::once(encoder.finish()));
1269 frame.present();
1270 return;
1271 }
1272 }
1273
1274 fn draw_quads(
1275 &self,
1276 quads: &[Quad],
1277 instance_offset: &mut u64,
1278 pass: &mut wgpu::RenderPass<'_>,
1279 ) -> bool {
1280 let data = unsafe { Self::instance_bytes(quads) };
1281 self.draw_instances(
1282 data,
1283 quads.len() as u32,
1284 &self.resources().pipelines.quads,
1285 instance_offset,
1286 pass,
1287 )
1288 }
1289
1290 fn draw_shadows(
1291 &self,
1292 shadows: &[Shadow],
1293 instance_offset: &mut u64,
1294 pass: &mut wgpu::RenderPass<'_>,
1295 ) -> bool {
1296 let data = unsafe { Self::instance_bytes(shadows) };
1297 self.draw_instances(
1298 data,
1299 shadows.len() as u32,
1300 &self.resources().pipelines.shadows,
1301 instance_offset,
1302 pass,
1303 )
1304 }
1305
1306 fn draw_underlines(
1307 &self,
1308 underlines: &[Underline],
1309 instance_offset: &mut u64,
1310 pass: &mut wgpu::RenderPass<'_>,
1311 ) -> bool {
1312 let data = unsafe { Self::instance_bytes(underlines) };
1313 self.draw_instances(
1314 data,
1315 underlines.len() as u32,
1316 &self.resources().pipelines.underlines,
1317 instance_offset,
1318 pass,
1319 )
1320 }
1321
1322 fn draw_monochrome_sprites(
1323 &self,
1324 sprites: &[MonochromeSprite],
1325 texture_id: AtlasTextureId,
1326 instance_offset: &mut u64,
1327 pass: &mut wgpu::RenderPass<'_>,
1328 ) -> bool {
1329 let tex_info = self.atlas.get_texture_info(texture_id);
1330 let data = unsafe { Self::instance_bytes(sprites) };
1331 self.draw_instances_with_texture(
1332 data,
1333 sprites.len() as u32,
1334 &tex_info.view,
1335 &self.resources().pipelines.mono_sprites,
1336 instance_offset,
1337 pass,
1338 )
1339 }
1340
1341 fn draw_subpixel_sprites(
1342 &self,
1343 sprites: &[SubpixelSprite],
1344 texture_id: AtlasTextureId,
1345 instance_offset: &mut u64,
1346 pass: &mut wgpu::RenderPass<'_>,
1347 ) -> bool {
1348 let tex_info = self.atlas.get_texture_info(texture_id);
1349 let data = unsafe { Self::instance_bytes(sprites) };
1350 let resources = self.resources();
1351 let pipeline = resources
1352 .pipelines
1353 .subpixel_sprites
1354 .as_ref()
1355 .unwrap_or(&resources.pipelines.mono_sprites);
1356 self.draw_instances_with_texture(
1357 data,
1358 sprites.len() as u32,
1359 &tex_info.view,
1360 pipeline,
1361 instance_offset,
1362 pass,
1363 )
1364 }
1365
1366 fn draw_polychrome_sprites(
1367 &self,
1368 sprites: &[PolychromeSprite],
1369 texture_id: AtlasTextureId,
1370 instance_offset: &mut u64,
1371 pass: &mut wgpu::RenderPass<'_>,
1372 ) -> bool {
1373 let tex_info = self.atlas.get_texture_info(texture_id);
1374 let data = unsafe { Self::instance_bytes(sprites) };
1375 self.draw_instances_with_texture(
1376 data,
1377 sprites.len() as u32,
1378 &tex_info.view,
1379 &self.resources().pipelines.poly_sprites,
1380 instance_offset,
1381 pass,
1382 )
1383 }
1384
1385 fn draw_instances(
1386 &self,
1387 data: &[u8],
1388 instance_count: u32,
1389 pipeline: &wgpu::RenderPipeline,
1390 instance_offset: &mut u64,
1391 pass: &mut wgpu::RenderPass<'_>,
1392 ) -> bool {
1393 if instance_count == 0 {
1394 return true;
1395 }
1396 let Some((offset, size)) = self.write_to_instance_buffer(instance_offset, data) else {
1397 return false;
1398 };
1399 let resources = self.resources();
1400 let bind_group = resources
1401 .device
1402 .create_bind_group(&wgpu::BindGroupDescriptor {
1403 label: None,
1404 layout: &resources.bind_group_layouts.instances,
1405 entries: &[wgpu::BindGroupEntry {
1406 binding: 0,
1407 resource: self.instance_binding(offset, size),
1408 }],
1409 });
1410 pass.set_pipeline(pipeline);
1411 pass.set_bind_group(0, &resources.globals_bind_group, &[]);
1412 pass.set_bind_group(1, &bind_group, &[]);
1413 pass.draw(0..4, 0..instance_count);
1414 true
1415 }
1416
1417 fn draw_instances_with_texture(
1418 &self,
1419 data: &[u8],
1420 instance_count: u32,
1421 texture_view: &wgpu::TextureView,
1422 pipeline: &wgpu::RenderPipeline,
1423 instance_offset: &mut u64,
1424 pass: &mut wgpu::RenderPass<'_>,
1425 ) -> bool {
1426 if instance_count == 0 {
1427 return true;
1428 }
1429 let Some((offset, size)) = self.write_to_instance_buffer(instance_offset, data) else {
1430 return false;
1431 };
1432 let resources = self.resources();
1433 let bind_group = resources
1434 .device
1435 .create_bind_group(&wgpu::BindGroupDescriptor {
1436 label: None,
1437 layout: &resources.bind_group_layouts.instances_with_texture,
1438 entries: &[
1439 wgpu::BindGroupEntry {
1440 binding: 0,
1441 resource: self.instance_binding(offset, size),
1442 },
1443 wgpu::BindGroupEntry {
1444 binding: 1,
1445 resource: wgpu::BindingResource::TextureView(texture_view),
1446 },
1447 wgpu::BindGroupEntry {
1448 binding: 2,
1449 resource: wgpu::BindingResource::Sampler(&resources.atlas_sampler),
1450 },
1451 ],
1452 });
1453 pass.set_pipeline(pipeline);
1454 pass.set_bind_group(0, &resources.globals_bind_group, &[]);
1455 pass.set_bind_group(1, &bind_group, &[]);
1456 pass.draw(0..4, 0..instance_count);
1457 true
1458 }
1459
1460 unsafe fn instance_bytes<T>(instances: &[T]) -> &[u8] {
1461 unsafe {
1462 std::slice::from_raw_parts(
1463 instances.as_ptr() as *const u8,
1464 std::mem::size_of_val(instances),
1465 )
1466 }
1467 }
1468
1469 fn draw_paths_from_intermediate(
1470 &self,
1471 paths: &[Path<ScaledPixels>],
1472 instance_offset: &mut u64,
1473 pass: &mut wgpu::RenderPass<'_>,
1474 ) -> bool {
1475 let first_path = &paths[0];
1476 let sprites: Vec<PathSprite> = if paths.last().map(|p| &p.order) == Some(&first_path.order)
1477 {
1478 paths
1479 .iter()
1480 .map(|p| PathSprite {
1481 bounds: p.clipped_bounds(),
1482 })
1483 .collect()
1484 } else {
1485 let mut bounds = first_path.clipped_bounds();
1486 for path in paths.iter().skip(1) {
1487 bounds = bounds.union(&path.clipped_bounds());
1488 }
1489 vec![PathSprite { bounds }]
1490 };
1491
1492 let resources = self.resources();
1493 let Some(path_intermediate_view) = resources.path_intermediate_view.as_ref() else {
1494 return true;
1495 };
1496
1497 let sprite_data = unsafe { Self::instance_bytes(&sprites) };
1498 self.draw_instances_with_texture(
1499 sprite_data,
1500 sprites.len() as u32,
1501 path_intermediate_view,
1502 &resources.pipelines.paths,
1503 instance_offset,
1504 pass,
1505 )
1506 }
1507
1508 fn draw_paths_to_intermediate(
1509 &self,
1510 encoder: &mut wgpu::CommandEncoder,
1511 paths: &[Path<ScaledPixels>],
1512 instance_offset: &mut u64,
1513 ) -> bool {
1514 let mut vertices = Vec::new();
1515 for path in paths {
1516 let bounds = path.clipped_bounds();
1517 vertices.extend(path.vertices.iter().map(|v| PathRasterizationVertex {
1518 xy_position: v.xy_position,
1519 st_position: v.st_position,
1520 color: path.color,
1521 bounds,
1522 }));
1523 }
1524
1525 if vertices.is_empty() {
1526 return true;
1527 }
1528
1529 let vertex_data = unsafe { Self::instance_bytes(&vertices) };
1530 let Some((vertex_offset, vertex_size)) =
1531 self.write_to_instance_buffer(instance_offset, vertex_data)
1532 else {
1533 return false;
1534 };
1535
1536 let resources = self.resources();
1537 let data_bind_group = resources
1538 .device
1539 .create_bind_group(&wgpu::BindGroupDescriptor {
1540 label: Some("path_rasterization_bind_group"),
1541 layout: &resources.bind_group_layouts.instances,
1542 entries: &[wgpu::BindGroupEntry {
1543 binding: 0,
1544 resource: self.instance_binding(vertex_offset, vertex_size),
1545 }],
1546 });
1547
1548 let Some(path_intermediate_view) = resources.path_intermediate_view.as_ref() else {
1549 return true;
1550 };
1551
1552 let (target_view, resolve_target) = if let Some(ref msaa_view) = resources.path_msaa_view {
1553 (msaa_view, Some(path_intermediate_view))
1554 } else {
1555 (path_intermediate_view, None)
1556 };
1557
1558 {
1559 let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
1560 label: Some("path_rasterization_pass"),
1561 color_attachments: &[Some(wgpu::RenderPassColorAttachment {
1562 view: target_view,
1563 resolve_target,
1564 ops: wgpu::Operations {
1565 load: wgpu::LoadOp::Clear(wgpu::Color::TRANSPARENT),
1566 store: wgpu::StoreOp::Store,
1567 },
1568 depth_slice: None,
1569 })],
1570 depth_stencil_attachment: None,
1571 ..Default::default()
1572 });
1573
1574 pass.set_pipeline(&resources.pipelines.path_rasterization);
1575 pass.set_bind_group(0, &resources.path_globals_bind_group, &[]);
1576 pass.set_bind_group(1, &data_bind_group, &[]);
1577 pass.draw(0..vertices.len() as u32, 0..1);
1578 }
1579
1580 true
1581 }
1582
1583 fn grow_instance_buffer(&mut self) {
1584 let new_capacity = (self.instance_buffer_capacity * 2).min(self.max_buffer_size);
1585 log::info!("increased instance buffer size to {}", new_capacity);
1586 let resources = self.resources_mut();
1587 resources.instance_buffer = resources.device.create_buffer(&wgpu::BufferDescriptor {
1588 label: Some("instance_buffer"),
1589 size: new_capacity,
1590 usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
1591 mapped_at_creation: false,
1592 });
1593 self.instance_buffer_capacity = new_capacity;
1594 }
1595
1596 fn write_to_instance_buffer(
1597 &self,
1598 instance_offset: &mut u64,
1599 data: &[u8],
1600 ) -> Option<(u64, NonZeroU64)> {
1601 let offset = (*instance_offset).next_multiple_of(self.storage_buffer_alignment);
1602 let size = (data.len() as u64).max(16);
1603 if offset + size > self.instance_buffer_capacity {
1604 return None;
1605 }
1606 let resources = self.resources();
1607 resources
1608 .queue
1609 .write_buffer(&resources.instance_buffer, offset, data);
1610 *instance_offset = offset + size;
1611 Some((offset, NonZeroU64::new(size).expect("size is at least 16")))
1612 }
1613
1614 fn instance_binding(&self, offset: u64, size: NonZeroU64) -> wgpu::BindingResource<'_> {
1615 wgpu::BindingResource::Buffer(wgpu::BufferBinding {
1616 buffer: &self.resources().instance_buffer,
1617 offset,
1618 size: Some(size),
1619 })
1620 }
1621
1622 pub fn destroy(&mut self) {
1623 // Release surface-bound GPU resources eagerly so the underlying native
1624 // window can be destroyed before the renderer itself is dropped.
1625 self.resources.take();
1626 }
1627
1628 /// Returns true if the GPU device was lost and recovery is needed.
1629 pub fn device_lost(&self) -> bool {
1630 self.device_lost.load(std::sync::atomic::Ordering::SeqCst)
1631 }
1632
1633 /// Recovers from a lost GPU device by recreating the renderer with a new context.
1634 ///
1635 /// Call this after detecting `device_lost()` returns true.
1636 ///
1637 /// This method coordinates recovery across multiple windows:
1638 /// - The first window to call this will recreate the shared context
1639 /// - Subsequent windows will adopt the already-recovered context
1640 #[cfg(not(target_family = "wasm"))]
1641 pub fn recover<W>(&mut self, window: &W) -> anyhow::Result<()>
1642 where
1643 W: HasWindowHandle + HasDisplayHandle + std::fmt::Debug + Send + Sync + Clone + 'static,
1644 {
1645 let gpu_context = self.context.as_ref().expect("recover requires gpu_context");
1646
1647 // Check if another window already recovered the context
1648 let needs_new_context = gpu_context
1649 .borrow()
1650 .as_ref()
1651 .is_none_or(|ctx| ctx.device_lost());
1652
1653 let window_handle = window
1654 .window_handle()
1655 .map_err(|e| anyhow::anyhow!("Failed to get window handle: {e}"))?;
1656
1657 let surface = if needs_new_context {
1658 log::warn!("GPU device lost, recreating context...");
1659
1660 // Drop old resources to release Arc<Device>/Arc<Queue> and GPU resources
1661 self.resources = None;
1662 *gpu_context.borrow_mut() = None;
1663
1664 // Wait for GPU driver to stabilize (350ms copied from windows :shrug:)
1665 std::thread::sleep(std::time::Duration::from_millis(350));
1666
1667 let instance = WgpuContext::instance(Box::new(window.clone()));
1668 let surface = create_surface(&instance, window_handle.as_raw())?;
1669 let new_context = WgpuContext::new(instance, &surface, self.compositor_gpu)?;
1670 *gpu_context.borrow_mut() = Some(new_context);
1671 surface
1672 } else {
1673 let ctx_ref = gpu_context.borrow();
1674 let instance = &ctx_ref.as_ref().unwrap().instance;
1675 create_surface(instance, window_handle.as_raw())?
1676 };
1677
1678 let config = WgpuSurfaceConfig {
1679 size: gpui::Size {
1680 width: gpui::DevicePixels(self.surface_config.width as i32),
1681 height: gpui::DevicePixels(self.surface_config.height as i32),
1682 },
1683 transparent: self.surface_config.alpha_mode != wgpu::CompositeAlphaMode::Opaque,
1684 };
1685 let gpu_context = Rc::clone(gpu_context);
1686 let ctx_ref = gpu_context.borrow();
1687 let context = ctx_ref.as_ref().expect("context should exist");
1688
1689 self.resources = None;
1690 self.atlas
1691 .handle_device_lost(Arc::clone(&context.device), Arc::clone(&context.queue));
1692
1693 *self = Self::new_internal(
1694 Some(gpu_context.clone()),
1695 context,
1696 surface,
1697 config,
1698 self.compositor_gpu,
1699 self.atlas.clone(),
1700 )?;
1701
1702 log::info!("GPU recovery complete");
1703 Ok(())
1704 }
1705}
1706
1707#[cfg(not(target_family = "wasm"))]
1708fn create_surface(
1709 instance: &wgpu::Instance,
1710 raw_window_handle: raw_window_handle::RawWindowHandle,
1711) -> anyhow::Result<wgpu::Surface<'static>> {
1712 unsafe {
1713 instance
1714 .create_surface_unsafe(wgpu::SurfaceTargetUnsafe::RawHandle {
1715 // Fall back to the display handle already provided via InstanceDescriptor::display.
1716 raw_display_handle: None,
1717 raw_window_handle,
1718 })
1719 .map_err(|e| anyhow::anyhow!("{e}"))
1720 }
1721}
1722
1723struct RenderingParameters {
1724 path_sample_count: u32,
1725 gamma_ratios: [f32; 4],
1726 grayscale_enhanced_contrast: f32,
1727 subpixel_enhanced_contrast: f32,
1728}
1729
1730impl RenderingParameters {
1731 fn new(adapter: &wgpu::Adapter, surface_format: wgpu::TextureFormat) -> Self {
1732 use std::env;
1733
1734 let format_features = adapter.get_texture_format_features(surface_format);
1735 let path_sample_count = [4, 2, 1]
1736 .into_iter()
1737 .find(|&n| format_features.flags.sample_count_supported(n))
1738 .unwrap_or(1);
1739
1740 let gamma = env::var("ZED_FONTS_GAMMA")
1741 .ok()
1742 .and_then(|v| v.parse().ok())
1743 .unwrap_or(1.8_f32)
1744 .clamp(1.0, 2.2);
1745 let gamma_ratios = get_gamma_correction_ratios(gamma);
1746
1747 let grayscale_enhanced_contrast = env::var("ZED_FONTS_GRAYSCALE_ENHANCED_CONTRAST")
1748 .ok()
1749 .and_then(|v| v.parse().ok())
1750 .unwrap_or(1.0_f32)
1751 .max(0.0);
1752
1753 let subpixel_enhanced_contrast = env::var("ZED_FONTS_SUBPIXEL_ENHANCED_CONTRAST")
1754 .ok()
1755 .and_then(|v| v.parse().ok())
1756 .unwrap_or(0.5_f32)
1757 .max(0.0);
1758
1759 Self {
1760 path_sample_count,
1761 gamma_ratios,
1762 grayscale_enhanced_contrast,
1763 subpixel_enhanced_contrast,
1764 }
1765 }
1766}