1#[cfg(not(target_family = "wasm"))]
2use crate::CompositorGpuHint;
3use crate::{WgpuAtlas, WgpuContext};
4use bytemuck::{Pod, Zeroable};
5use gpui::{
6 AtlasTextureId, Background, Bounds, DevicePixels, GpuSpecs, MonochromeSprite, Path, Point,
7 PolychromeSprite, PrimitiveBatch, Quad, ScaledPixels, Scene, Shadow, Size, SubpixelSprite,
8 Underline, get_gamma_correction_ratios,
9};
10use log::warn;
11#[cfg(not(target_family = "wasm"))]
12use raw_window_handle::{HasDisplayHandle, HasWindowHandle};
13use std::num::NonZeroU64;
14use std::sync::{Arc, Mutex};
15
16#[repr(C)]
17#[derive(Clone, Copy, Pod, Zeroable)]
18struct GlobalParams {
19 viewport_size: [f32; 2],
20 premultiplied_alpha: u32,
21 pad: u32,
22}
23
24#[repr(C)]
25#[derive(Clone, Copy, Pod, Zeroable)]
26struct PodBounds {
27 origin: [f32; 2],
28 size: [f32; 2],
29}
30
31impl From<Bounds<ScaledPixels>> for PodBounds {
32 fn from(bounds: Bounds<ScaledPixels>) -> Self {
33 Self {
34 origin: [bounds.origin.x.0, bounds.origin.y.0],
35 size: [bounds.size.width.0, bounds.size.height.0],
36 }
37 }
38}
39
40#[repr(C)]
41#[derive(Clone, Copy, Pod, Zeroable)]
42struct SurfaceParams {
43 bounds: PodBounds,
44 content_mask: PodBounds,
45}
46
47#[repr(C)]
48#[derive(Clone, Copy, Pod, Zeroable)]
49struct GammaParams {
50 gamma_ratios: [f32; 4],
51 grayscale_enhanced_contrast: f32,
52 subpixel_enhanced_contrast: f32,
53 _pad: [f32; 2],
54}
55
56#[derive(Clone, Debug)]
57#[repr(C)]
58struct PathSprite {
59 bounds: Bounds<ScaledPixels>,
60}
61
62#[derive(Clone, Debug)]
63#[repr(C)]
64struct PathRasterizationVertex {
65 xy_position: Point<ScaledPixels>,
66 st_position: Point<f32>,
67 color: Background,
68 bounds: Bounds<ScaledPixels>,
69}
70
71pub struct WgpuSurfaceConfig {
72 pub size: Size<DevicePixels>,
73 pub transparent: bool,
74}
75
76struct WgpuPipelines {
77 quads: wgpu::RenderPipeline,
78 shadows: wgpu::RenderPipeline,
79 path_rasterization: wgpu::RenderPipeline,
80 paths: wgpu::RenderPipeline,
81 underlines: wgpu::RenderPipeline,
82 mono_sprites: wgpu::RenderPipeline,
83 subpixel_sprites: Option<wgpu::RenderPipeline>,
84 poly_sprites: wgpu::RenderPipeline,
85 #[allow(dead_code)]
86 surfaces: wgpu::RenderPipeline,
87}
88
89struct WgpuBindGroupLayouts {
90 globals: wgpu::BindGroupLayout,
91 instances: wgpu::BindGroupLayout,
92 instances_with_texture: wgpu::BindGroupLayout,
93 surfaces: wgpu::BindGroupLayout,
94}
95
96pub struct WgpuRenderer {
97 device: Arc<wgpu::Device>,
98 queue: Arc<wgpu::Queue>,
99 surface: wgpu::Surface<'static>,
100 surface_config: wgpu::SurfaceConfiguration,
101 pipelines: WgpuPipelines,
102 bind_group_layouts: WgpuBindGroupLayouts,
103 atlas: Arc<WgpuAtlas>,
104 atlas_sampler: wgpu::Sampler,
105 globals_buffer: wgpu::Buffer,
106 path_globals_offset: u64,
107 gamma_offset: u64,
108 globals_bind_group: wgpu::BindGroup,
109 path_globals_bind_group: wgpu::BindGroup,
110 instance_buffer: wgpu::Buffer,
111 instance_buffer_capacity: u64,
112 max_buffer_size: u64,
113 storage_buffer_alignment: u64,
114 path_intermediate_texture: Option<wgpu::Texture>,
115 path_intermediate_view: Option<wgpu::TextureView>,
116 path_msaa_texture: Option<wgpu::Texture>,
117 path_msaa_view: Option<wgpu::TextureView>,
118 rendering_params: RenderingParameters,
119 dual_source_blending: bool,
120 adapter_info: wgpu::AdapterInfo,
121 transparent_alpha_mode: wgpu::CompositeAlphaMode,
122 opaque_alpha_mode: wgpu::CompositeAlphaMode,
123 max_texture_size: u32,
124 last_error: Arc<Mutex<Option<String>>>,
125 failed_frame_count: u32,
126}
127
128impl WgpuRenderer {
129 /// Creates a new WgpuRenderer from raw window handles.
130 ///
131 /// # Safety
132 /// The caller must ensure that the window handle remains valid for the lifetime
133 /// of the returned renderer.
134 #[cfg(not(target_family = "wasm"))]
135 pub fn new<W: HasWindowHandle + HasDisplayHandle>(
136 gpu_context: &mut Option<WgpuContext>,
137 window: &W,
138 config: WgpuSurfaceConfig,
139 compositor_gpu: Option<CompositorGpuHint>,
140 ) -> anyhow::Result<Self> {
141 let window_handle = window
142 .window_handle()
143 .map_err(|e| anyhow::anyhow!("Failed to get window handle: {e}"))?;
144 let display_handle = window
145 .display_handle()
146 .map_err(|e| anyhow::anyhow!("Failed to get display handle: {e}"))?;
147
148 let target = wgpu::SurfaceTargetUnsafe::RawHandle {
149 raw_display_handle: display_handle.as_raw(),
150 raw_window_handle: window_handle.as_raw(),
151 };
152
153 // Use the existing context's instance if available, otherwise create a new one.
154 // The surface must be created with the same instance that will be used for
155 // adapter selection, otherwise wgpu will panic.
156 let instance = gpu_context
157 .as_ref()
158 .map(|ctx| ctx.instance.clone())
159 .unwrap_or_else(WgpuContext::instance);
160
161 // Safety: The caller guarantees that the window handle is valid for the
162 // lifetime of this renderer. In practice, the RawWindow struct is created
163 // from the native window handles and the surface is dropped before the window.
164 let surface = unsafe {
165 instance
166 .create_surface_unsafe(target)
167 .map_err(|e| anyhow::anyhow!("Failed to create surface: {e}"))?
168 };
169
170 let context = match gpu_context {
171 Some(context) => {
172 context.check_compatible_with_surface(&surface)?;
173 context
174 }
175 None => gpu_context.insert(WgpuContext::new(instance, &surface, compositor_gpu)?),
176 };
177
178 Self::new_with_surface(context, surface, config)
179 }
180
181 #[cfg(target_family = "wasm")]
182 pub fn new_from_canvas(
183 context: &WgpuContext,
184 canvas: &web_sys::HtmlCanvasElement,
185 config: WgpuSurfaceConfig,
186 ) -> anyhow::Result<Self> {
187 let surface = context
188 .instance
189 .create_surface(wgpu::SurfaceTarget::Canvas(canvas.clone()))
190 .map_err(|e| anyhow::anyhow!("Failed to create surface: {e}"))?;
191 Self::new_with_surface(context, surface, config)
192 }
193
194 fn new_with_surface(
195 context: &WgpuContext,
196 surface: wgpu::Surface<'static>,
197 config: WgpuSurfaceConfig,
198 ) -> anyhow::Result<Self> {
199 let surface_caps = surface.get_capabilities(&context.adapter);
200 let preferred_formats = [
201 wgpu::TextureFormat::Bgra8Unorm,
202 wgpu::TextureFormat::Rgba8Unorm,
203 ];
204 let surface_format = preferred_formats
205 .iter()
206 .find(|f| surface_caps.formats.contains(f))
207 .copied()
208 .or_else(|| surface_caps.formats.iter().find(|f| !f.is_srgb()).copied())
209 .or_else(|| surface_caps.formats.first().copied())
210 .ok_or_else(|| {
211 anyhow::anyhow!(
212 "Surface reports no supported texture formats for adapter {:?}",
213 context.adapter.get_info().name
214 )
215 })?;
216
217 let pick_alpha_mode =
218 |preferences: &[wgpu::CompositeAlphaMode]| -> anyhow::Result<wgpu::CompositeAlphaMode> {
219 preferences
220 .iter()
221 .find(|p| surface_caps.alpha_modes.contains(p))
222 .copied()
223 .or_else(|| surface_caps.alpha_modes.first().copied())
224 .ok_or_else(|| {
225 anyhow::anyhow!(
226 "Surface reports no supported alpha modes for adapter {:?}",
227 context.adapter.get_info().name
228 )
229 })
230 };
231
232 let transparent_alpha_mode = pick_alpha_mode(&[
233 wgpu::CompositeAlphaMode::PreMultiplied,
234 wgpu::CompositeAlphaMode::Inherit,
235 ])?;
236
237 let opaque_alpha_mode = pick_alpha_mode(&[
238 wgpu::CompositeAlphaMode::Opaque,
239 wgpu::CompositeAlphaMode::Inherit,
240 ])?;
241
242 let alpha_mode = if config.transparent {
243 transparent_alpha_mode
244 } else {
245 opaque_alpha_mode
246 };
247
248 let device = Arc::clone(&context.device);
249 let max_texture_size = device.limits().max_texture_dimension_2d;
250
251 let requested_width = config.size.width.0 as u32;
252 let requested_height = config.size.height.0 as u32;
253 let clamped_width = requested_width.min(max_texture_size);
254 let clamped_height = requested_height.min(max_texture_size);
255
256 if clamped_width != requested_width || clamped_height != requested_height {
257 warn!(
258 "Requested surface size ({}, {}) exceeds maximum texture dimension {}. \
259 Clamping to ({}, {}). Window content may not fill the entire window.",
260 requested_width, requested_height, max_texture_size, clamped_width, clamped_height
261 );
262 }
263
264 let surface_config = wgpu::SurfaceConfiguration {
265 usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
266 format: surface_format,
267 width: clamped_width.max(1),
268 height: clamped_height.max(1),
269 present_mode: wgpu::PresentMode::Fifo,
270 desired_maximum_frame_latency: 2,
271 alpha_mode,
272 view_formats: vec![],
273 };
274 // Configure the surface immediately. The adapter selection process already validated
275 // that this adapter can successfully configure this surface.
276 surface.configure(&context.device, &surface_config);
277
278 let queue = Arc::clone(&context.queue);
279 let dual_source_blending = context.supports_dual_source_blending();
280
281 let rendering_params = RenderingParameters::new(&context.adapter, surface_format);
282 let bind_group_layouts = Self::create_bind_group_layouts(&device);
283 let pipelines = Self::create_pipelines(
284 &device,
285 &bind_group_layouts,
286 surface_format,
287 alpha_mode,
288 rendering_params.path_sample_count,
289 dual_source_blending,
290 );
291
292 let atlas = Arc::new(WgpuAtlas::new(Arc::clone(&device), Arc::clone(&queue)));
293 let atlas_sampler = device.create_sampler(&wgpu::SamplerDescriptor {
294 label: Some("atlas_sampler"),
295 mag_filter: wgpu::FilterMode::Linear,
296 min_filter: wgpu::FilterMode::Linear,
297 ..Default::default()
298 });
299
300 let uniform_alignment = device.limits().min_uniform_buffer_offset_alignment as u64;
301 let globals_size = std::mem::size_of::<GlobalParams>() as u64;
302 let gamma_size = std::mem::size_of::<GammaParams>() as u64;
303 let path_globals_offset = globals_size.next_multiple_of(uniform_alignment);
304 let gamma_offset = (path_globals_offset + globals_size).next_multiple_of(uniform_alignment);
305
306 let globals_buffer = device.create_buffer(&wgpu::BufferDescriptor {
307 label: Some("globals_buffer"),
308 size: gamma_offset + gamma_size,
309 usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
310 mapped_at_creation: false,
311 });
312
313 let max_buffer_size = device.limits().max_buffer_size;
314 let storage_buffer_alignment = device.limits().min_storage_buffer_offset_alignment as u64;
315 let initial_instance_buffer_capacity = 2 * 1024 * 1024;
316 let instance_buffer = device.create_buffer(&wgpu::BufferDescriptor {
317 label: Some("instance_buffer"),
318 size: initial_instance_buffer_capacity,
319 usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
320 mapped_at_creation: false,
321 });
322
323 let globals_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
324 label: Some("globals_bind_group"),
325 layout: &bind_group_layouts.globals,
326 entries: &[
327 wgpu::BindGroupEntry {
328 binding: 0,
329 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
330 buffer: &globals_buffer,
331 offset: 0,
332 size: Some(NonZeroU64::new(globals_size).unwrap()),
333 }),
334 },
335 wgpu::BindGroupEntry {
336 binding: 1,
337 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
338 buffer: &globals_buffer,
339 offset: gamma_offset,
340 size: Some(NonZeroU64::new(gamma_size).unwrap()),
341 }),
342 },
343 ],
344 });
345
346 let path_globals_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
347 label: Some("path_globals_bind_group"),
348 layout: &bind_group_layouts.globals,
349 entries: &[
350 wgpu::BindGroupEntry {
351 binding: 0,
352 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
353 buffer: &globals_buffer,
354 offset: path_globals_offset,
355 size: Some(NonZeroU64::new(globals_size).unwrap()),
356 }),
357 },
358 wgpu::BindGroupEntry {
359 binding: 1,
360 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
361 buffer: &globals_buffer,
362 offset: gamma_offset,
363 size: Some(NonZeroU64::new(gamma_size).unwrap()),
364 }),
365 },
366 ],
367 });
368
369 let adapter_info = context.adapter.get_info();
370
371 let last_error: Arc<Mutex<Option<String>>> = Arc::new(Mutex::new(None));
372 let last_error_clone = Arc::clone(&last_error);
373 device.on_uncaptured_error(Arc::new(move |error| {
374 let mut guard = last_error_clone.lock().unwrap();
375 *guard = Some(error.to_string());
376 }));
377
378 Ok(Self {
379 device,
380 queue,
381 surface,
382 surface_config,
383 pipelines,
384 bind_group_layouts,
385 atlas,
386 atlas_sampler,
387 globals_buffer,
388 path_globals_offset,
389 gamma_offset,
390 globals_bind_group,
391 path_globals_bind_group,
392 instance_buffer,
393 instance_buffer_capacity: initial_instance_buffer_capacity,
394 max_buffer_size,
395 storage_buffer_alignment,
396 // Defer intermediate texture creation to first draw call via ensure_intermediate_textures().
397 // This avoids panics when the device/surface is in an invalid state during initialization.
398 path_intermediate_texture: None,
399 path_intermediate_view: None,
400 path_msaa_texture: None,
401 path_msaa_view: None,
402 rendering_params,
403 dual_source_blending,
404 adapter_info,
405 transparent_alpha_mode,
406 opaque_alpha_mode,
407 max_texture_size,
408 last_error,
409 failed_frame_count: 0,
410 })
411 }
412
413 fn create_bind_group_layouts(device: &wgpu::Device) -> WgpuBindGroupLayouts {
414 let globals =
415 device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
416 label: Some("globals_layout"),
417 entries: &[
418 wgpu::BindGroupLayoutEntry {
419 binding: 0,
420 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
421 ty: wgpu::BindingType::Buffer {
422 ty: wgpu::BufferBindingType::Uniform,
423 has_dynamic_offset: false,
424 min_binding_size: NonZeroU64::new(
425 std::mem::size_of::<GlobalParams>() as u64
426 ),
427 },
428 count: None,
429 },
430 wgpu::BindGroupLayoutEntry {
431 binding: 1,
432 visibility: wgpu::ShaderStages::FRAGMENT,
433 ty: wgpu::BindingType::Buffer {
434 ty: wgpu::BufferBindingType::Uniform,
435 has_dynamic_offset: false,
436 min_binding_size: NonZeroU64::new(
437 std::mem::size_of::<GammaParams>() as u64
438 ),
439 },
440 count: None,
441 },
442 ],
443 });
444
445 let storage_buffer_entry = |binding: u32| wgpu::BindGroupLayoutEntry {
446 binding,
447 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
448 ty: wgpu::BindingType::Buffer {
449 ty: wgpu::BufferBindingType::Storage { read_only: true },
450 has_dynamic_offset: false,
451 min_binding_size: None,
452 },
453 count: None,
454 };
455
456 let instances = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
457 label: Some("instances_layout"),
458 entries: &[storage_buffer_entry(0)],
459 });
460
461 let instances_with_texture =
462 device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
463 label: Some("instances_with_texture_layout"),
464 entries: &[
465 storage_buffer_entry(0),
466 wgpu::BindGroupLayoutEntry {
467 binding: 1,
468 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
469 ty: wgpu::BindingType::Texture {
470 sample_type: wgpu::TextureSampleType::Float { filterable: true },
471 view_dimension: wgpu::TextureViewDimension::D2,
472 multisampled: false,
473 },
474 count: None,
475 },
476 wgpu::BindGroupLayoutEntry {
477 binding: 2,
478 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
479 ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
480 count: None,
481 },
482 ],
483 });
484
485 let surfaces = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
486 label: Some("surfaces_layout"),
487 entries: &[
488 wgpu::BindGroupLayoutEntry {
489 binding: 0,
490 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
491 ty: wgpu::BindingType::Buffer {
492 ty: wgpu::BufferBindingType::Uniform,
493 has_dynamic_offset: false,
494 min_binding_size: NonZeroU64::new(
495 std::mem::size_of::<SurfaceParams>() as u64
496 ),
497 },
498 count: None,
499 },
500 wgpu::BindGroupLayoutEntry {
501 binding: 1,
502 visibility: wgpu::ShaderStages::FRAGMENT,
503 ty: wgpu::BindingType::Texture {
504 sample_type: wgpu::TextureSampleType::Float { filterable: true },
505 view_dimension: wgpu::TextureViewDimension::D2,
506 multisampled: false,
507 },
508 count: None,
509 },
510 wgpu::BindGroupLayoutEntry {
511 binding: 2,
512 visibility: wgpu::ShaderStages::FRAGMENT,
513 ty: wgpu::BindingType::Texture {
514 sample_type: wgpu::TextureSampleType::Float { filterable: true },
515 view_dimension: wgpu::TextureViewDimension::D2,
516 multisampled: false,
517 },
518 count: None,
519 },
520 wgpu::BindGroupLayoutEntry {
521 binding: 3,
522 visibility: wgpu::ShaderStages::FRAGMENT,
523 ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
524 count: None,
525 },
526 ],
527 });
528
529 WgpuBindGroupLayouts {
530 globals,
531 instances,
532 instances_with_texture,
533 surfaces,
534 }
535 }
536
537 fn create_pipelines(
538 device: &wgpu::Device,
539 layouts: &WgpuBindGroupLayouts,
540 surface_format: wgpu::TextureFormat,
541 alpha_mode: wgpu::CompositeAlphaMode,
542 path_sample_count: u32,
543 dual_source_blending: bool,
544 ) -> WgpuPipelines {
545 let base_shader_source = include_str!("shaders.wgsl");
546 let shader_module = device.create_shader_module(wgpu::ShaderModuleDescriptor {
547 label: Some("gpui_shaders"),
548 source: wgpu::ShaderSource::Wgsl(std::borrow::Cow::Borrowed(base_shader_source)),
549 });
550
551 let subpixel_shader_source = include_str!("shaders_subpixel.wgsl");
552 let subpixel_shader_module = if dual_source_blending {
553 let combined = format!(
554 "enable dual_source_blending;\n{base_shader_source}\n{subpixel_shader_source}"
555 );
556 Some(device.create_shader_module(wgpu::ShaderModuleDescriptor {
557 label: Some("gpui_subpixel_shaders"),
558 source: wgpu::ShaderSource::Wgsl(std::borrow::Cow::Owned(combined)),
559 }))
560 } else {
561 None
562 };
563
564 let blend_mode = match alpha_mode {
565 wgpu::CompositeAlphaMode::PreMultiplied => {
566 wgpu::BlendState::PREMULTIPLIED_ALPHA_BLENDING
567 }
568 _ => wgpu::BlendState::ALPHA_BLENDING,
569 };
570
571 let color_target = wgpu::ColorTargetState {
572 format: surface_format,
573 blend: Some(blend_mode),
574 write_mask: wgpu::ColorWrites::ALL,
575 };
576
577 let create_pipeline = |name: &str,
578 vs_entry: &str,
579 fs_entry: &str,
580 globals_layout: &wgpu::BindGroupLayout,
581 data_layout: &wgpu::BindGroupLayout,
582 topology: wgpu::PrimitiveTopology,
583 color_targets: &[Option<wgpu::ColorTargetState>],
584 sample_count: u32,
585 module: &wgpu::ShaderModule| {
586 let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
587 label: Some(&format!("{name}_layout")),
588 bind_group_layouts: &[globals_layout, data_layout],
589 immediate_size: 0,
590 });
591
592 device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
593 label: Some(name),
594 layout: Some(&pipeline_layout),
595 vertex: wgpu::VertexState {
596 module,
597 entry_point: Some(vs_entry),
598 buffers: &[],
599 compilation_options: wgpu::PipelineCompilationOptions::default(),
600 },
601 fragment: Some(wgpu::FragmentState {
602 module,
603 entry_point: Some(fs_entry),
604 targets: color_targets,
605 compilation_options: wgpu::PipelineCompilationOptions::default(),
606 }),
607 primitive: wgpu::PrimitiveState {
608 topology,
609 strip_index_format: None,
610 front_face: wgpu::FrontFace::Ccw,
611 cull_mode: None,
612 polygon_mode: wgpu::PolygonMode::Fill,
613 unclipped_depth: false,
614 conservative: false,
615 },
616 depth_stencil: None,
617 multisample: wgpu::MultisampleState {
618 count: sample_count,
619 mask: !0,
620 alpha_to_coverage_enabled: false,
621 },
622 multiview_mask: None,
623 cache: None,
624 })
625 };
626
627 let quads = create_pipeline(
628 "quads",
629 "vs_quad",
630 "fs_quad",
631 &layouts.globals,
632 &layouts.instances,
633 wgpu::PrimitiveTopology::TriangleStrip,
634 &[Some(color_target.clone())],
635 1,
636 &shader_module,
637 );
638
639 let shadows = create_pipeline(
640 "shadows",
641 "vs_shadow",
642 "fs_shadow",
643 &layouts.globals,
644 &layouts.instances,
645 wgpu::PrimitiveTopology::TriangleStrip,
646 &[Some(color_target.clone())],
647 1,
648 &shader_module,
649 );
650
651 let path_rasterization = create_pipeline(
652 "path_rasterization",
653 "vs_path_rasterization",
654 "fs_path_rasterization",
655 &layouts.globals,
656 &layouts.instances,
657 wgpu::PrimitiveTopology::TriangleList,
658 &[Some(wgpu::ColorTargetState {
659 format: surface_format,
660 blend: Some(wgpu::BlendState::PREMULTIPLIED_ALPHA_BLENDING),
661 write_mask: wgpu::ColorWrites::ALL,
662 })],
663 path_sample_count,
664 &shader_module,
665 );
666
667 let paths_blend = wgpu::BlendState {
668 color: wgpu::BlendComponent {
669 src_factor: wgpu::BlendFactor::One,
670 dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
671 operation: wgpu::BlendOperation::Add,
672 },
673 alpha: wgpu::BlendComponent {
674 src_factor: wgpu::BlendFactor::One,
675 dst_factor: wgpu::BlendFactor::One,
676 operation: wgpu::BlendOperation::Add,
677 },
678 };
679
680 let paths = create_pipeline(
681 "paths",
682 "vs_path",
683 "fs_path",
684 &layouts.globals,
685 &layouts.instances_with_texture,
686 wgpu::PrimitiveTopology::TriangleStrip,
687 &[Some(wgpu::ColorTargetState {
688 format: surface_format,
689 blend: Some(paths_blend),
690 write_mask: wgpu::ColorWrites::ALL,
691 })],
692 1,
693 &shader_module,
694 );
695
696 let underlines = create_pipeline(
697 "underlines",
698 "vs_underline",
699 "fs_underline",
700 &layouts.globals,
701 &layouts.instances,
702 wgpu::PrimitiveTopology::TriangleStrip,
703 &[Some(color_target.clone())],
704 1,
705 &shader_module,
706 );
707
708 let mono_sprites = create_pipeline(
709 "mono_sprites",
710 "vs_mono_sprite",
711 "fs_mono_sprite",
712 &layouts.globals,
713 &layouts.instances_with_texture,
714 wgpu::PrimitiveTopology::TriangleStrip,
715 &[Some(color_target.clone())],
716 1,
717 &shader_module,
718 );
719
720 let subpixel_sprites = if let Some(subpixel_module) = &subpixel_shader_module {
721 let subpixel_blend = wgpu::BlendState {
722 color: wgpu::BlendComponent {
723 src_factor: wgpu::BlendFactor::Src1,
724 dst_factor: wgpu::BlendFactor::OneMinusSrc1,
725 operation: wgpu::BlendOperation::Add,
726 },
727 alpha: wgpu::BlendComponent {
728 src_factor: wgpu::BlendFactor::One,
729 dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
730 operation: wgpu::BlendOperation::Add,
731 },
732 };
733
734 Some(create_pipeline(
735 "subpixel_sprites",
736 "vs_subpixel_sprite",
737 "fs_subpixel_sprite",
738 &layouts.globals,
739 &layouts.instances_with_texture,
740 wgpu::PrimitiveTopology::TriangleStrip,
741 &[Some(wgpu::ColorTargetState {
742 format: surface_format,
743 blend: Some(subpixel_blend),
744 write_mask: wgpu::ColorWrites::COLOR,
745 })],
746 1,
747 subpixel_module,
748 ))
749 } else {
750 None
751 };
752
753 let poly_sprites = create_pipeline(
754 "poly_sprites",
755 "vs_poly_sprite",
756 "fs_poly_sprite",
757 &layouts.globals,
758 &layouts.instances_with_texture,
759 wgpu::PrimitiveTopology::TriangleStrip,
760 &[Some(color_target.clone())],
761 1,
762 &shader_module,
763 );
764
765 let surfaces = create_pipeline(
766 "surfaces",
767 "vs_surface",
768 "fs_surface",
769 &layouts.globals,
770 &layouts.surfaces,
771 wgpu::PrimitiveTopology::TriangleStrip,
772 &[Some(color_target)],
773 1,
774 &shader_module,
775 );
776
777 WgpuPipelines {
778 quads,
779 shadows,
780 path_rasterization,
781 paths,
782 underlines,
783 mono_sprites,
784 subpixel_sprites,
785 poly_sprites,
786 surfaces,
787 }
788 }
789
790 fn create_path_intermediate(
791 device: &wgpu::Device,
792 format: wgpu::TextureFormat,
793 width: u32,
794 height: u32,
795 ) -> (wgpu::Texture, wgpu::TextureView) {
796 let texture = device.create_texture(&wgpu::TextureDescriptor {
797 label: Some("path_intermediate"),
798 size: wgpu::Extent3d {
799 width: width.max(1),
800 height: height.max(1),
801 depth_or_array_layers: 1,
802 },
803 mip_level_count: 1,
804 sample_count: 1,
805 dimension: wgpu::TextureDimension::D2,
806 format,
807 usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::TEXTURE_BINDING,
808 view_formats: &[],
809 });
810 let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
811 (texture, view)
812 }
813
814 fn create_msaa_if_needed(
815 device: &wgpu::Device,
816 format: wgpu::TextureFormat,
817 width: u32,
818 height: u32,
819 sample_count: u32,
820 ) -> Option<(wgpu::Texture, wgpu::TextureView)> {
821 if sample_count <= 1 {
822 return None;
823 }
824 let texture = device.create_texture(&wgpu::TextureDescriptor {
825 label: Some("path_msaa"),
826 size: wgpu::Extent3d {
827 width: width.max(1),
828 height: height.max(1),
829 depth_or_array_layers: 1,
830 },
831 mip_level_count: 1,
832 sample_count,
833 dimension: wgpu::TextureDimension::D2,
834 format,
835 usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
836 view_formats: &[],
837 });
838 let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
839 Some((texture, view))
840 }
841
842 pub fn update_drawable_size(&mut self, size: Size<DevicePixels>) {
843 let width = size.width.0 as u32;
844 let height = size.height.0 as u32;
845
846 if width != self.surface_config.width || height != self.surface_config.height {
847 let clamped_width = width.min(self.max_texture_size);
848 let clamped_height = height.min(self.max_texture_size);
849
850 if clamped_width != width || clamped_height != height {
851 warn!(
852 "Requested surface size ({}, {}) exceeds maximum texture dimension {}. \
853 Clamping to ({}, {}). Window content may not fill the entire window.",
854 width, height, self.max_texture_size, clamped_width, clamped_height
855 );
856 }
857
858 // Wait for any in-flight GPU work to complete before destroying textures
859 if let Err(e) = self.device.poll(wgpu::PollType::Wait {
860 submission_index: None,
861 timeout: None,
862 }) {
863 warn!("Failed to poll device during resize: {e:?}");
864 }
865
866 // Destroy old textures before allocating new ones to avoid GPU memory spikes
867 if let Some(ref texture) = self.path_intermediate_texture {
868 texture.destroy();
869 }
870 if let Some(ref texture) = self.path_msaa_texture {
871 texture.destroy();
872 }
873
874 self.surface_config.width = clamped_width.max(1);
875 self.surface_config.height = clamped_height.max(1);
876 self.surface.configure(&self.device, &self.surface_config);
877
878 // Invalidate intermediate textures - they will be lazily recreated
879 // in draw() after we confirm the surface is healthy. This avoids
880 // panics when the device/surface is in an invalid state during resize.
881 self.path_intermediate_texture = None;
882 self.path_intermediate_view = None;
883 self.path_msaa_texture = None;
884 self.path_msaa_view = None;
885 }
886 }
887
888 fn ensure_intermediate_textures(&mut self) {
889 if self.path_intermediate_texture.is_some() {
890 return;
891 }
892
893 let (path_intermediate_texture, path_intermediate_view) = {
894 let (t, v) = Self::create_path_intermediate(
895 &self.device,
896 self.surface_config.format,
897 self.surface_config.width,
898 self.surface_config.height,
899 );
900 (Some(t), Some(v))
901 };
902 self.path_intermediate_texture = path_intermediate_texture;
903 self.path_intermediate_view = path_intermediate_view;
904
905 let (path_msaa_texture, path_msaa_view) = Self::create_msaa_if_needed(
906 &self.device,
907 self.surface_config.format,
908 self.surface_config.width,
909 self.surface_config.height,
910 self.rendering_params.path_sample_count,
911 )
912 .map(|(t, v)| (Some(t), Some(v)))
913 .unwrap_or((None, None));
914 self.path_msaa_texture = path_msaa_texture;
915 self.path_msaa_view = path_msaa_view;
916 }
917
918 pub fn update_transparency(&mut self, transparent: bool) {
919 let new_alpha_mode = if transparent {
920 self.transparent_alpha_mode
921 } else {
922 self.opaque_alpha_mode
923 };
924
925 if new_alpha_mode != self.surface_config.alpha_mode {
926 self.surface_config.alpha_mode = new_alpha_mode;
927 self.surface.configure(&self.device, &self.surface_config);
928 self.pipelines = Self::create_pipelines(
929 &self.device,
930 &self.bind_group_layouts,
931 self.surface_config.format,
932 self.surface_config.alpha_mode,
933 self.rendering_params.path_sample_count,
934 self.dual_source_blending,
935 );
936 }
937 }
938
939 #[allow(dead_code)]
940 pub fn viewport_size(&self) -> Size<DevicePixels> {
941 Size {
942 width: DevicePixels(self.surface_config.width as i32),
943 height: DevicePixels(self.surface_config.height as i32),
944 }
945 }
946
947 pub fn sprite_atlas(&self) -> &Arc<WgpuAtlas> {
948 &self.atlas
949 }
950
951 pub fn supports_dual_source_blending(&self) -> bool {
952 self.dual_source_blending
953 }
954
955 pub fn gpu_specs(&self) -> GpuSpecs {
956 GpuSpecs {
957 is_software_emulated: self.adapter_info.device_type == wgpu::DeviceType::Cpu,
958 device_name: self.adapter_info.name.clone(),
959 driver_name: self.adapter_info.driver.clone(),
960 driver_info: self.adapter_info.driver_info.clone(),
961 }
962 }
963
964 pub fn max_texture_size(&self) -> u32 {
965 self.max_texture_size
966 }
967
968 pub fn draw(&mut self, scene: &Scene) {
969 let last_error = self.last_error.lock().unwrap().take();
970 if let Some(error) = last_error {
971 self.failed_frame_count += 1;
972 log::error!(
973 "GPU error during frame (failure {} of 20): {error}",
974 self.failed_frame_count
975 );
976 if self.failed_frame_count > 20 {
977 panic!("Too many consecutive GPU errors. Last error: {error}");
978 }
979 } else {
980 self.failed_frame_count = 0;
981 }
982
983 self.atlas.before_frame();
984
985 let frame = match self.surface.get_current_texture() {
986 Ok(frame) => frame,
987 Err(wgpu::SurfaceError::Lost | wgpu::SurfaceError::Outdated) => {
988 self.surface.configure(&self.device, &self.surface_config);
989 return;
990 }
991 Err(e) => {
992 log::error!("Failed to acquire surface texture: {e}");
993 return;
994 }
995 };
996
997 // Now that we know the surface is healthy, ensure intermediate textures exist
998 self.ensure_intermediate_textures();
999
1000 let frame_view = frame
1001 .texture
1002 .create_view(&wgpu::TextureViewDescriptor::default());
1003
1004 let gamma_params = GammaParams {
1005 gamma_ratios: self.rendering_params.gamma_ratios,
1006 grayscale_enhanced_contrast: self.rendering_params.grayscale_enhanced_contrast,
1007 subpixel_enhanced_contrast: self.rendering_params.subpixel_enhanced_contrast,
1008 _pad: [0.0; 2],
1009 };
1010
1011 let globals = GlobalParams {
1012 viewport_size: [
1013 self.surface_config.width as f32,
1014 self.surface_config.height as f32,
1015 ],
1016 premultiplied_alpha: if self.surface_config.alpha_mode
1017 == wgpu::CompositeAlphaMode::PreMultiplied
1018 {
1019 1
1020 } else {
1021 0
1022 },
1023 pad: 0,
1024 };
1025
1026 let path_globals = GlobalParams {
1027 premultiplied_alpha: 0,
1028 ..globals
1029 };
1030
1031 self.queue
1032 .write_buffer(&self.globals_buffer, 0, bytemuck::bytes_of(&globals));
1033 self.queue.write_buffer(
1034 &self.globals_buffer,
1035 self.path_globals_offset,
1036 bytemuck::bytes_of(&path_globals),
1037 );
1038 self.queue.write_buffer(
1039 &self.globals_buffer,
1040 self.gamma_offset,
1041 bytemuck::bytes_of(&gamma_params),
1042 );
1043
1044 loop {
1045 let mut instance_offset: u64 = 0;
1046 let mut overflow = false;
1047
1048 let mut encoder = self
1049 .device
1050 .create_command_encoder(&wgpu::CommandEncoderDescriptor {
1051 label: Some("main_encoder"),
1052 });
1053
1054 {
1055 let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
1056 label: Some("main_pass"),
1057 color_attachments: &[Some(wgpu::RenderPassColorAttachment {
1058 view: &frame_view,
1059 resolve_target: None,
1060 ops: wgpu::Operations {
1061 load: wgpu::LoadOp::Clear(wgpu::Color::TRANSPARENT),
1062 store: wgpu::StoreOp::Store,
1063 },
1064 depth_slice: None,
1065 })],
1066 depth_stencil_attachment: None,
1067 ..Default::default()
1068 });
1069
1070 for batch in scene.batches() {
1071 let ok = match batch {
1072 PrimitiveBatch::Quads(range) => {
1073 self.draw_quads(&scene.quads[range], &mut instance_offset, &mut pass)
1074 }
1075 PrimitiveBatch::Shadows(range) => self.draw_shadows(
1076 &scene.shadows[range],
1077 &mut instance_offset,
1078 &mut pass,
1079 ),
1080 PrimitiveBatch::Paths(range) => {
1081 let paths = &scene.paths[range];
1082 if paths.is_empty() {
1083 continue;
1084 }
1085
1086 drop(pass);
1087
1088 let did_draw = self.draw_paths_to_intermediate(
1089 &mut encoder,
1090 paths,
1091 &mut instance_offset,
1092 );
1093
1094 pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
1095 label: Some("main_pass_continued"),
1096 color_attachments: &[Some(wgpu::RenderPassColorAttachment {
1097 view: &frame_view,
1098 resolve_target: None,
1099 ops: wgpu::Operations {
1100 load: wgpu::LoadOp::Load,
1101 store: wgpu::StoreOp::Store,
1102 },
1103 depth_slice: None,
1104 })],
1105 depth_stencil_attachment: None,
1106 ..Default::default()
1107 });
1108
1109 if did_draw {
1110 self.draw_paths_from_intermediate(
1111 paths,
1112 &mut instance_offset,
1113 &mut pass,
1114 )
1115 } else {
1116 false
1117 }
1118 }
1119 PrimitiveBatch::Underlines(range) => self.draw_underlines(
1120 &scene.underlines[range],
1121 &mut instance_offset,
1122 &mut pass,
1123 ),
1124 PrimitiveBatch::MonochromeSprites { texture_id, range } => self
1125 .draw_monochrome_sprites(
1126 &scene.monochrome_sprites[range],
1127 texture_id,
1128 &mut instance_offset,
1129 &mut pass,
1130 ),
1131 PrimitiveBatch::SubpixelSprites { texture_id, range } => self
1132 .draw_subpixel_sprites(
1133 &scene.subpixel_sprites[range],
1134 texture_id,
1135 &mut instance_offset,
1136 &mut pass,
1137 ),
1138 PrimitiveBatch::PolychromeSprites { texture_id, range } => self
1139 .draw_polychrome_sprites(
1140 &scene.polychrome_sprites[range],
1141 texture_id,
1142 &mut instance_offset,
1143 &mut pass,
1144 ),
1145 PrimitiveBatch::Surfaces(_surfaces) => {
1146 // Surfaces are macOS-only for video playback
1147 // Not implemented for Linux/wgpu
1148 true
1149 }
1150 };
1151 if !ok {
1152 overflow = true;
1153 break;
1154 }
1155 }
1156 }
1157
1158 if overflow {
1159 drop(encoder);
1160 if self.instance_buffer_capacity >= self.max_buffer_size {
1161 log::error!(
1162 "instance buffer size grew too large: {}",
1163 self.instance_buffer_capacity
1164 );
1165 frame.present();
1166 return;
1167 }
1168 self.grow_instance_buffer();
1169 continue;
1170 }
1171
1172 self.queue.submit(std::iter::once(encoder.finish()));
1173 frame.present();
1174 return;
1175 }
1176 }
1177
1178 fn draw_quads(
1179 &self,
1180 quads: &[Quad],
1181 instance_offset: &mut u64,
1182 pass: &mut wgpu::RenderPass<'_>,
1183 ) -> bool {
1184 let data = unsafe { Self::instance_bytes(quads) };
1185 self.draw_instances(
1186 data,
1187 quads.len() as u32,
1188 &self.pipelines.quads,
1189 instance_offset,
1190 pass,
1191 )
1192 }
1193
1194 fn draw_shadows(
1195 &self,
1196 shadows: &[Shadow],
1197 instance_offset: &mut u64,
1198 pass: &mut wgpu::RenderPass<'_>,
1199 ) -> bool {
1200 let data = unsafe { Self::instance_bytes(shadows) };
1201 self.draw_instances(
1202 data,
1203 shadows.len() as u32,
1204 &self.pipelines.shadows,
1205 instance_offset,
1206 pass,
1207 )
1208 }
1209
1210 fn draw_underlines(
1211 &self,
1212 underlines: &[Underline],
1213 instance_offset: &mut u64,
1214 pass: &mut wgpu::RenderPass<'_>,
1215 ) -> bool {
1216 let data = unsafe { Self::instance_bytes(underlines) };
1217 self.draw_instances(
1218 data,
1219 underlines.len() as u32,
1220 &self.pipelines.underlines,
1221 instance_offset,
1222 pass,
1223 )
1224 }
1225
1226 fn draw_monochrome_sprites(
1227 &self,
1228 sprites: &[MonochromeSprite],
1229 texture_id: AtlasTextureId,
1230 instance_offset: &mut u64,
1231 pass: &mut wgpu::RenderPass<'_>,
1232 ) -> bool {
1233 let tex_info = self.atlas.get_texture_info(texture_id);
1234 let data = unsafe { Self::instance_bytes(sprites) };
1235 self.draw_instances_with_texture(
1236 data,
1237 sprites.len() as u32,
1238 &tex_info.view,
1239 &self.pipelines.mono_sprites,
1240 instance_offset,
1241 pass,
1242 )
1243 }
1244
1245 fn draw_subpixel_sprites(
1246 &self,
1247 sprites: &[SubpixelSprite],
1248 texture_id: AtlasTextureId,
1249 instance_offset: &mut u64,
1250 pass: &mut wgpu::RenderPass<'_>,
1251 ) -> bool {
1252 let tex_info = self.atlas.get_texture_info(texture_id);
1253 let data = unsafe { Self::instance_bytes(sprites) };
1254 let pipeline = self
1255 .pipelines
1256 .subpixel_sprites
1257 .as_ref()
1258 .unwrap_or(&self.pipelines.mono_sprites);
1259 self.draw_instances_with_texture(
1260 data,
1261 sprites.len() as u32,
1262 &tex_info.view,
1263 pipeline,
1264 instance_offset,
1265 pass,
1266 )
1267 }
1268
1269 fn draw_polychrome_sprites(
1270 &self,
1271 sprites: &[PolychromeSprite],
1272 texture_id: AtlasTextureId,
1273 instance_offset: &mut u64,
1274 pass: &mut wgpu::RenderPass<'_>,
1275 ) -> bool {
1276 let tex_info = self.atlas.get_texture_info(texture_id);
1277 let data = unsafe { Self::instance_bytes(sprites) };
1278 self.draw_instances_with_texture(
1279 data,
1280 sprites.len() as u32,
1281 &tex_info.view,
1282 &self.pipelines.poly_sprites,
1283 instance_offset,
1284 pass,
1285 )
1286 }
1287
1288 fn draw_instances(
1289 &self,
1290 data: &[u8],
1291 instance_count: u32,
1292 pipeline: &wgpu::RenderPipeline,
1293 instance_offset: &mut u64,
1294 pass: &mut wgpu::RenderPass<'_>,
1295 ) -> bool {
1296 if instance_count == 0 {
1297 return true;
1298 }
1299 let Some((offset, size)) = self.write_to_instance_buffer(instance_offset, data) else {
1300 return false;
1301 };
1302 let bind_group = self.device.create_bind_group(&wgpu::BindGroupDescriptor {
1303 label: None,
1304 layout: &self.bind_group_layouts.instances,
1305 entries: &[wgpu::BindGroupEntry {
1306 binding: 0,
1307 resource: self.instance_binding(offset, size),
1308 }],
1309 });
1310 pass.set_pipeline(pipeline);
1311 pass.set_bind_group(0, &self.globals_bind_group, &[]);
1312 pass.set_bind_group(1, &bind_group, &[]);
1313 pass.draw(0..4, 0..instance_count);
1314 true
1315 }
1316
1317 fn draw_instances_with_texture(
1318 &self,
1319 data: &[u8],
1320 instance_count: u32,
1321 texture_view: &wgpu::TextureView,
1322 pipeline: &wgpu::RenderPipeline,
1323 instance_offset: &mut u64,
1324 pass: &mut wgpu::RenderPass<'_>,
1325 ) -> bool {
1326 if instance_count == 0 {
1327 return true;
1328 }
1329 let Some((offset, size)) = self.write_to_instance_buffer(instance_offset, data) else {
1330 return false;
1331 };
1332 let bind_group = self.device.create_bind_group(&wgpu::BindGroupDescriptor {
1333 label: None,
1334 layout: &self.bind_group_layouts.instances_with_texture,
1335 entries: &[
1336 wgpu::BindGroupEntry {
1337 binding: 0,
1338 resource: self.instance_binding(offset, size),
1339 },
1340 wgpu::BindGroupEntry {
1341 binding: 1,
1342 resource: wgpu::BindingResource::TextureView(texture_view),
1343 },
1344 wgpu::BindGroupEntry {
1345 binding: 2,
1346 resource: wgpu::BindingResource::Sampler(&self.atlas_sampler),
1347 },
1348 ],
1349 });
1350 pass.set_pipeline(pipeline);
1351 pass.set_bind_group(0, &self.globals_bind_group, &[]);
1352 pass.set_bind_group(1, &bind_group, &[]);
1353 pass.draw(0..4, 0..instance_count);
1354 true
1355 }
1356
1357 unsafe fn instance_bytes<T>(instances: &[T]) -> &[u8] {
1358 unsafe {
1359 std::slice::from_raw_parts(
1360 instances.as_ptr() as *const u8,
1361 std::mem::size_of_val(instances),
1362 )
1363 }
1364 }
1365
1366 fn draw_paths_from_intermediate(
1367 &self,
1368 paths: &[Path<ScaledPixels>],
1369 instance_offset: &mut u64,
1370 pass: &mut wgpu::RenderPass<'_>,
1371 ) -> bool {
1372 let first_path = &paths[0];
1373 let sprites: Vec<PathSprite> = if paths.last().map(|p| &p.order) == Some(&first_path.order)
1374 {
1375 paths
1376 .iter()
1377 .map(|p| PathSprite {
1378 bounds: p.clipped_bounds(),
1379 })
1380 .collect()
1381 } else {
1382 let mut bounds = first_path.clipped_bounds();
1383 for path in paths.iter().skip(1) {
1384 bounds = bounds.union(&path.clipped_bounds());
1385 }
1386 vec![PathSprite { bounds }]
1387 };
1388
1389 let Some(path_intermediate_view) = self.path_intermediate_view.as_ref() else {
1390 return true;
1391 };
1392
1393 let sprite_data = unsafe { Self::instance_bytes(&sprites) };
1394 self.draw_instances_with_texture(
1395 sprite_data,
1396 sprites.len() as u32,
1397 path_intermediate_view,
1398 &self.pipelines.paths,
1399 instance_offset,
1400 pass,
1401 )
1402 }
1403
1404 fn draw_paths_to_intermediate(
1405 &self,
1406 encoder: &mut wgpu::CommandEncoder,
1407 paths: &[Path<ScaledPixels>],
1408 instance_offset: &mut u64,
1409 ) -> bool {
1410 let mut vertices = Vec::new();
1411 for path in paths {
1412 let bounds = path.clipped_bounds();
1413 vertices.extend(path.vertices.iter().map(|v| PathRasterizationVertex {
1414 xy_position: v.xy_position,
1415 st_position: v.st_position,
1416 color: path.color,
1417 bounds,
1418 }));
1419 }
1420
1421 if vertices.is_empty() {
1422 return true;
1423 }
1424
1425 let vertex_data = unsafe { Self::instance_bytes(&vertices) };
1426 let Some((vertex_offset, vertex_size)) =
1427 self.write_to_instance_buffer(instance_offset, vertex_data)
1428 else {
1429 return false;
1430 };
1431
1432 let data_bind_group = self.device.create_bind_group(&wgpu::BindGroupDescriptor {
1433 label: Some("path_rasterization_bind_group"),
1434 layout: &self.bind_group_layouts.instances,
1435 entries: &[wgpu::BindGroupEntry {
1436 binding: 0,
1437 resource: self.instance_binding(vertex_offset, vertex_size),
1438 }],
1439 });
1440
1441 let Some(path_intermediate_view) = self.path_intermediate_view.as_ref() else {
1442 return true;
1443 };
1444
1445 let (target_view, resolve_target) = if let Some(ref msaa_view) = self.path_msaa_view {
1446 (msaa_view, Some(path_intermediate_view))
1447 } else {
1448 (path_intermediate_view, None)
1449 };
1450
1451 {
1452 let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
1453 label: Some("path_rasterization_pass"),
1454 color_attachments: &[Some(wgpu::RenderPassColorAttachment {
1455 view: target_view,
1456 resolve_target,
1457 ops: wgpu::Operations {
1458 load: wgpu::LoadOp::Clear(wgpu::Color::TRANSPARENT),
1459 store: wgpu::StoreOp::Store,
1460 },
1461 depth_slice: None,
1462 })],
1463 depth_stencil_attachment: None,
1464 ..Default::default()
1465 });
1466
1467 pass.set_pipeline(&self.pipelines.path_rasterization);
1468 pass.set_bind_group(0, &self.path_globals_bind_group, &[]);
1469 pass.set_bind_group(1, &data_bind_group, &[]);
1470 pass.draw(0..vertices.len() as u32, 0..1);
1471 }
1472
1473 true
1474 }
1475
1476 fn grow_instance_buffer(&mut self) {
1477 let new_capacity = (self.instance_buffer_capacity * 2).min(self.max_buffer_size);
1478 log::info!("increased instance buffer size to {}", new_capacity);
1479 self.instance_buffer = self.device.create_buffer(&wgpu::BufferDescriptor {
1480 label: Some("instance_buffer"),
1481 size: new_capacity,
1482 usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
1483 mapped_at_creation: false,
1484 });
1485 self.instance_buffer_capacity = new_capacity;
1486 }
1487
1488 fn write_to_instance_buffer(
1489 &self,
1490 instance_offset: &mut u64,
1491 data: &[u8],
1492 ) -> Option<(u64, NonZeroU64)> {
1493 let offset = (*instance_offset).next_multiple_of(self.storage_buffer_alignment);
1494 let size = (data.len() as u64).max(16);
1495 if offset + size > self.instance_buffer_capacity {
1496 return None;
1497 }
1498 self.queue.write_buffer(&self.instance_buffer, offset, data);
1499 *instance_offset = offset + size;
1500 Some((offset, NonZeroU64::new(size).expect("size is at least 16")))
1501 }
1502
1503 fn instance_binding(&self, offset: u64, size: NonZeroU64) -> wgpu::BindingResource<'_> {
1504 wgpu::BindingResource::Buffer(wgpu::BufferBinding {
1505 buffer: &self.instance_buffer,
1506 offset,
1507 size: Some(size),
1508 })
1509 }
1510
1511 pub fn destroy(&mut self) {
1512 // wgpu resources are automatically cleaned up when dropped
1513 }
1514}
1515
1516struct RenderingParameters {
1517 path_sample_count: u32,
1518 gamma_ratios: [f32; 4],
1519 grayscale_enhanced_contrast: f32,
1520 subpixel_enhanced_contrast: f32,
1521}
1522
1523impl RenderingParameters {
1524 fn new(adapter: &wgpu::Adapter, surface_format: wgpu::TextureFormat) -> Self {
1525 use std::env;
1526
1527 let format_features = adapter.get_texture_format_features(surface_format);
1528 let path_sample_count = [4, 2, 1]
1529 .into_iter()
1530 .find(|&n| format_features.flags.sample_count_supported(n))
1531 .unwrap_or(1);
1532
1533 let gamma = env::var("ZED_FONTS_GAMMA")
1534 .ok()
1535 .and_then(|v| v.parse().ok())
1536 .unwrap_or(1.8_f32)
1537 .clamp(1.0, 2.2);
1538 let gamma_ratios = get_gamma_correction_ratios(gamma);
1539
1540 let grayscale_enhanced_contrast = env::var("ZED_FONTS_GRAYSCALE_ENHANCED_CONTRAST")
1541 .ok()
1542 .and_then(|v| v.parse().ok())
1543 .unwrap_or(1.0_f32)
1544 .max(0.0);
1545
1546 let subpixel_enhanced_contrast = env::var("ZED_FONTS_SUBPIXEL_ENHANCED_CONTRAST")
1547 .ok()
1548 .and_then(|v| v.parse().ok())
1549 .unwrap_or(0.5_f32)
1550 .max(0.0);
1551
1552 Self {
1553 path_sample_count,
1554 gamma_ratios,
1555 grayscale_enhanced_contrast,
1556 subpixel_enhanced_contrast,
1557 }
1558 }
1559}