1use super::{CompositorGpuHint, WgpuAtlas, WgpuContext};
2use crate::{
3 AtlasTextureId, Background, Bounds, DevicePixels, GpuSpecs, MonochromeSprite, Path, Point,
4 PolychromeSprite, PrimitiveBatch, Quad, ScaledPixels, Scene, Shadow, Size, SubpixelSprite,
5 Underline, get_gamma_correction_ratios,
6};
7use bytemuck::{Pod, Zeroable};
8use log::warn;
9use raw_window_handle::{HasDisplayHandle, HasWindowHandle};
10use std::num::NonZeroU64;
11use std::sync::{Arc, Mutex};
12
13#[repr(C)]
14#[derive(Clone, Copy, Pod, Zeroable)]
15struct GlobalParams {
16 viewport_size: [f32; 2],
17 premultiplied_alpha: u32,
18 pad: u32,
19}
20
21#[repr(C)]
22#[derive(Clone, Copy, Pod, Zeroable)]
23struct PodBounds {
24 origin: [f32; 2],
25 size: [f32; 2],
26}
27
28impl From<Bounds<ScaledPixels>> for PodBounds {
29 fn from(bounds: Bounds<ScaledPixels>) -> Self {
30 Self {
31 origin: [bounds.origin.x.0, bounds.origin.y.0],
32 size: [bounds.size.width.0, bounds.size.height.0],
33 }
34 }
35}
36
37#[repr(C)]
38#[derive(Clone, Copy, Pod, Zeroable)]
39struct SurfaceParams {
40 bounds: PodBounds,
41 content_mask: PodBounds,
42}
43
44#[repr(C)]
45#[derive(Clone, Copy, Pod, Zeroable)]
46struct GammaParams {
47 gamma_ratios: [f32; 4],
48 grayscale_enhanced_contrast: f32,
49 subpixel_enhanced_contrast: f32,
50 _pad: [f32; 2],
51}
52
53#[derive(Clone, Debug)]
54#[repr(C)]
55struct PathSprite {
56 bounds: Bounds<ScaledPixels>,
57}
58
59#[derive(Clone, Debug)]
60#[repr(C)]
61struct PathRasterizationVertex {
62 xy_position: Point<ScaledPixels>,
63 st_position: Point<f32>,
64 color: Background,
65 bounds: Bounds<ScaledPixels>,
66}
67
68pub struct WgpuSurfaceConfig {
69 pub size: Size<DevicePixels>,
70 pub transparent: bool,
71}
72
73struct WgpuPipelines {
74 quads: wgpu::RenderPipeline,
75 shadows: wgpu::RenderPipeline,
76 path_rasterization: wgpu::RenderPipeline,
77 paths: wgpu::RenderPipeline,
78 underlines: wgpu::RenderPipeline,
79 mono_sprites: wgpu::RenderPipeline,
80 subpixel_sprites: Option<wgpu::RenderPipeline>,
81 poly_sprites: wgpu::RenderPipeline,
82 #[allow(dead_code)]
83 surfaces: wgpu::RenderPipeline,
84}
85
86struct WgpuBindGroupLayouts {
87 globals: wgpu::BindGroupLayout,
88 instances: wgpu::BindGroupLayout,
89 instances_with_texture: wgpu::BindGroupLayout,
90 surfaces: wgpu::BindGroupLayout,
91}
92
93pub struct WgpuRenderer {
94 device: Arc<wgpu::Device>,
95 queue: Arc<wgpu::Queue>,
96 surface: wgpu::Surface<'static>,
97 surface_config: wgpu::SurfaceConfiguration,
98 pipelines: WgpuPipelines,
99 bind_group_layouts: WgpuBindGroupLayouts,
100 atlas: Arc<WgpuAtlas>,
101 atlas_sampler: wgpu::Sampler,
102 globals_buffer: wgpu::Buffer,
103 path_globals_offset: u64,
104 gamma_offset: u64,
105 globals_bind_group: wgpu::BindGroup,
106 path_globals_bind_group: wgpu::BindGroup,
107 instance_buffer: wgpu::Buffer,
108 instance_buffer_capacity: u64,
109 storage_buffer_alignment: u64,
110 path_intermediate_texture: Option<wgpu::Texture>,
111 path_intermediate_view: Option<wgpu::TextureView>,
112 path_msaa_texture: Option<wgpu::Texture>,
113 path_msaa_view: Option<wgpu::TextureView>,
114 rendering_params: RenderingParameters,
115 dual_source_blending: bool,
116 adapter_info: wgpu::AdapterInfo,
117 transparent_alpha_mode: wgpu::CompositeAlphaMode,
118 opaque_alpha_mode: wgpu::CompositeAlphaMode,
119 max_texture_size: u32,
120 last_error: Arc<Mutex<Option<String>>>,
121 failed_frame_count: u32,
122}
123
124impl WgpuRenderer {
125 /// Creates a new WgpuRenderer from raw window handles.
126 ///
127 /// # Safety
128 /// The caller must ensure that the window handle remains valid for the lifetime
129 /// of the returned renderer.
130 pub fn new<W: HasWindowHandle + HasDisplayHandle>(
131 gpu_context: &mut Option<WgpuContext>,
132 window: &W,
133 config: WgpuSurfaceConfig,
134 compositor_gpu: Option<CompositorGpuHint>,
135 ) -> anyhow::Result<Self> {
136 let window_handle = window
137 .window_handle()
138 .map_err(|e| anyhow::anyhow!("Failed to get window handle: {e}"))?;
139 let display_handle = window
140 .display_handle()
141 .map_err(|e| anyhow::anyhow!("Failed to get display handle: {e}"))?;
142
143 let target = wgpu::SurfaceTargetUnsafe::RawHandle {
144 raw_display_handle: display_handle.as_raw(),
145 raw_window_handle: window_handle.as_raw(),
146 };
147
148 // Use the existing context's instance if available, otherwise create a new one.
149 // The surface must be created with the same instance that will be used for
150 // adapter selection, otherwise wgpu will panic.
151 let instance = gpu_context
152 .as_ref()
153 .map(|ctx| ctx.instance.clone())
154 .unwrap_or_else(WgpuContext::instance);
155
156 // Safety: The caller guarantees that the window handle is valid for the
157 // lifetime of this renderer. In practice, the RawWindow struct is created
158 // from the native window handles and the surface is dropped before the window.
159 let surface = unsafe {
160 instance
161 .create_surface_unsafe(target)
162 .map_err(|e| anyhow::anyhow!("Failed to create surface: {e}"))?
163 };
164
165 let context = match gpu_context {
166 Some(context) => {
167 context.check_compatible_with_surface(&surface)?;
168 context
169 }
170 None => gpu_context.insert(WgpuContext::new(instance, &surface, compositor_gpu)?),
171 };
172
173 Self::new_with_surface(context, surface, config)
174 }
175
176 fn new_with_surface(
177 context: &WgpuContext,
178 surface: wgpu::Surface<'static>,
179 config: WgpuSurfaceConfig,
180 ) -> anyhow::Result<Self> {
181 let surface_caps = surface.get_capabilities(&context.adapter);
182 let preferred_formats = [
183 wgpu::TextureFormat::Bgra8Unorm,
184 wgpu::TextureFormat::Rgba8Unorm,
185 ];
186 let surface_format = preferred_formats
187 .iter()
188 .find(|f| surface_caps.formats.contains(f))
189 .copied()
190 .or_else(|| surface_caps.formats.iter().find(|f| !f.is_srgb()).copied())
191 .or_else(|| surface_caps.formats.first().copied())
192 .ok_or_else(|| {
193 anyhow::anyhow!(
194 "Surface reports no supported texture formats for adapter {:?}",
195 context.adapter.get_info().name
196 )
197 })?;
198
199 let pick_alpha_mode =
200 |preferences: &[wgpu::CompositeAlphaMode]| -> anyhow::Result<wgpu::CompositeAlphaMode> {
201 preferences
202 .iter()
203 .find(|p| surface_caps.alpha_modes.contains(p))
204 .copied()
205 .or_else(|| surface_caps.alpha_modes.first().copied())
206 .ok_or_else(|| {
207 anyhow::anyhow!(
208 "Surface reports no supported alpha modes for adapter {:?}",
209 context.adapter.get_info().name
210 )
211 })
212 };
213
214 let transparent_alpha_mode = pick_alpha_mode(&[
215 wgpu::CompositeAlphaMode::PreMultiplied,
216 wgpu::CompositeAlphaMode::Inherit,
217 ])?;
218
219 let opaque_alpha_mode = pick_alpha_mode(&[
220 wgpu::CompositeAlphaMode::Opaque,
221 wgpu::CompositeAlphaMode::Inherit,
222 ])?;
223
224 let alpha_mode = if config.transparent {
225 transparent_alpha_mode
226 } else {
227 opaque_alpha_mode
228 };
229
230 let device = Arc::clone(&context.device);
231 let max_texture_size = device.limits().max_texture_dimension_2d;
232
233 let requested_width = config.size.width.0 as u32;
234 let requested_height = config.size.height.0 as u32;
235 let clamped_width = requested_width.min(max_texture_size);
236 let clamped_height = requested_height.min(max_texture_size);
237
238 if clamped_width != requested_width || clamped_height != requested_height {
239 warn!(
240 "Requested surface size ({}, {}) exceeds maximum texture dimension {}. \
241 Clamping to ({}, {}). Window content may not fill the entire window.",
242 requested_width, requested_height, max_texture_size, clamped_width, clamped_height
243 );
244 }
245
246 let surface_config = wgpu::SurfaceConfiguration {
247 usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
248 format: surface_format,
249 width: clamped_width.max(1),
250 height: clamped_height.max(1),
251 present_mode: wgpu::PresentMode::Fifo,
252 desired_maximum_frame_latency: 2,
253 alpha_mode,
254 view_formats: vec![],
255 };
256 // Configure the surface immediately. The adapter selection process already validated
257 // that this adapter can successfully configure this surface.
258 surface.configure(&context.device, &surface_config);
259
260 let queue = Arc::clone(&context.queue);
261 let dual_source_blending = context.supports_dual_source_blending();
262
263 let rendering_params = RenderingParameters::new(&context.adapter, surface_format);
264 let bind_group_layouts = Self::create_bind_group_layouts(&device);
265 let pipelines = Self::create_pipelines(
266 &device,
267 &bind_group_layouts,
268 surface_format,
269 alpha_mode,
270 rendering_params.path_sample_count,
271 dual_source_blending,
272 );
273
274 let atlas = Arc::new(WgpuAtlas::new(Arc::clone(&device), Arc::clone(&queue)));
275 let atlas_sampler = device.create_sampler(&wgpu::SamplerDescriptor {
276 label: Some("atlas_sampler"),
277 mag_filter: wgpu::FilterMode::Linear,
278 min_filter: wgpu::FilterMode::Linear,
279 ..Default::default()
280 });
281
282 let uniform_alignment = device.limits().min_uniform_buffer_offset_alignment as u64;
283 let globals_size = std::mem::size_of::<GlobalParams>() as u64;
284 let gamma_size = std::mem::size_of::<GammaParams>() as u64;
285 let path_globals_offset = globals_size.next_multiple_of(uniform_alignment);
286 let gamma_offset = (path_globals_offset + globals_size).next_multiple_of(uniform_alignment);
287
288 let globals_buffer = device.create_buffer(&wgpu::BufferDescriptor {
289 label: Some("globals_buffer"),
290 size: gamma_offset + gamma_size,
291 usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
292 mapped_at_creation: false,
293 });
294
295 let storage_buffer_alignment = device.limits().min_storage_buffer_offset_alignment as u64;
296 let initial_instance_buffer_capacity = 2 * 1024 * 1024;
297 let instance_buffer = device.create_buffer(&wgpu::BufferDescriptor {
298 label: Some("instance_buffer"),
299 size: initial_instance_buffer_capacity,
300 usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
301 mapped_at_creation: false,
302 });
303
304 let globals_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
305 label: Some("globals_bind_group"),
306 layout: &bind_group_layouts.globals,
307 entries: &[
308 wgpu::BindGroupEntry {
309 binding: 0,
310 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
311 buffer: &globals_buffer,
312 offset: 0,
313 size: Some(NonZeroU64::new(globals_size).unwrap()),
314 }),
315 },
316 wgpu::BindGroupEntry {
317 binding: 1,
318 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
319 buffer: &globals_buffer,
320 offset: gamma_offset,
321 size: Some(NonZeroU64::new(gamma_size).unwrap()),
322 }),
323 },
324 ],
325 });
326
327 let path_globals_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
328 label: Some("path_globals_bind_group"),
329 layout: &bind_group_layouts.globals,
330 entries: &[
331 wgpu::BindGroupEntry {
332 binding: 0,
333 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
334 buffer: &globals_buffer,
335 offset: path_globals_offset,
336 size: Some(NonZeroU64::new(globals_size).unwrap()),
337 }),
338 },
339 wgpu::BindGroupEntry {
340 binding: 1,
341 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
342 buffer: &globals_buffer,
343 offset: gamma_offset,
344 size: Some(NonZeroU64::new(gamma_size).unwrap()),
345 }),
346 },
347 ],
348 });
349
350 let adapter_info = context.adapter.get_info();
351
352 let last_error: Arc<Mutex<Option<String>>> = Arc::new(Mutex::new(None));
353 let last_error_clone = Arc::clone(&last_error);
354 device.on_uncaptured_error(Arc::new(move |error| {
355 let mut guard = last_error_clone.lock().unwrap();
356 *guard = Some(error.to_string());
357 }));
358
359 Ok(Self {
360 device,
361 queue,
362 surface,
363 surface_config,
364 pipelines,
365 bind_group_layouts,
366 atlas,
367 atlas_sampler,
368 globals_buffer,
369 path_globals_offset,
370 gamma_offset,
371 globals_bind_group,
372 path_globals_bind_group,
373 instance_buffer,
374 instance_buffer_capacity: initial_instance_buffer_capacity,
375 storage_buffer_alignment,
376 // Defer intermediate texture creation to first draw call via ensure_intermediate_textures().
377 // This avoids panics when the device/surface is in an invalid state during initialization.
378 path_intermediate_texture: None,
379 path_intermediate_view: None,
380 path_msaa_texture: None,
381 path_msaa_view: None,
382 rendering_params,
383 dual_source_blending,
384 adapter_info,
385 transparent_alpha_mode,
386 opaque_alpha_mode,
387 max_texture_size,
388 last_error,
389 failed_frame_count: 0,
390 })
391 }
392
393 fn create_bind_group_layouts(device: &wgpu::Device) -> WgpuBindGroupLayouts {
394 let globals =
395 device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
396 label: Some("globals_layout"),
397 entries: &[
398 wgpu::BindGroupLayoutEntry {
399 binding: 0,
400 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
401 ty: wgpu::BindingType::Buffer {
402 ty: wgpu::BufferBindingType::Uniform,
403 has_dynamic_offset: false,
404 min_binding_size: NonZeroU64::new(
405 std::mem::size_of::<GlobalParams>() as u64
406 ),
407 },
408 count: None,
409 },
410 wgpu::BindGroupLayoutEntry {
411 binding: 1,
412 visibility: wgpu::ShaderStages::FRAGMENT,
413 ty: wgpu::BindingType::Buffer {
414 ty: wgpu::BufferBindingType::Uniform,
415 has_dynamic_offset: false,
416 min_binding_size: NonZeroU64::new(
417 std::mem::size_of::<GammaParams>() as u64
418 ),
419 },
420 count: None,
421 },
422 ],
423 });
424
425 let storage_buffer_entry = |binding: u32| wgpu::BindGroupLayoutEntry {
426 binding,
427 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
428 ty: wgpu::BindingType::Buffer {
429 ty: wgpu::BufferBindingType::Storage { read_only: true },
430 has_dynamic_offset: false,
431 min_binding_size: None,
432 },
433 count: None,
434 };
435
436 let instances = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
437 label: Some("instances_layout"),
438 entries: &[storage_buffer_entry(0)],
439 });
440
441 let instances_with_texture =
442 device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
443 label: Some("instances_with_texture_layout"),
444 entries: &[
445 storage_buffer_entry(0),
446 wgpu::BindGroupLayoutEntry {
447 binding: 1,
448 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
449 ty: wgpu::BindingType::Texture {
450 sample_type: wgpu::TextureSampleType::Float { filterable: true },
451 view_dimension: wgpu::TextureViewDimension::D2,
452 multisampled: false,
453 },
454 count: None,
455 },
456 wgpu::BindGroupLayoutEntry {
457 binding: 2,
458 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
459 ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
460 count: None,
461 },
462 ],
463 });
464
465 let surfaces = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
466 label: Some("surfaces_layout"),
467 entries: &[
468 wgpu::BindGroupLayoutEntry {
469 binding: 0,
470 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
471 ty: wgpu::BindingType::Buffer {
472 ty: wgpu::BufferBindingType::Uniform,
473 has_dynamic_offset: false,
474 min_binding_size: NonZeroU64::new(
475 std::mem::size_of::<SurfaceParams>() as u64
476 ),
477 },
478 count: None,
479 },
480 wgpu::BindGroupLayoutEntry {
481 binding: 1,
482 visibility: wgpu::ShaderStages::FRAGMENT,
483 ty: wgpu::BindingType::Texture {
484 sample_type: wgpu::TextureSampleType::Float { filterable: true },
485 view_dimension: wgpu::TextureViewDimension::D2,
486 multisampled: false,
487 },
488 count: None,
489 },
490 wgpu::BindGroupLayoutEntry {
491 binding: 2,
492 visibility: wgpu::ShaderStages::FRAGMENT,
493 ty: wgpu::BindingType::Texture {
494 sample_type: wgpu::TextureSampleType::Float { filterable: true },
495 view_dimension: wgpu::TextureViewDimension::D2,
496 multisampled: false,
497 },
498 count: None,
499 },
500 wgpu::BindGroupLayoutEntry {
501 binding: 3,
502 visibility: wgpu::ShaderStages::FRAGMENT,
503 ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
504 count: None,
505 },
506 ],
507 });
508
509 WgpuBindGroupLayouts {
510 globals,
511 instances,
512 instances_with_texture,
513 surfaces,
514 }
515 }
516
517 fn create_pipelines(
518 device: &wgpu::Device,
519 layouts: &WgpuBindGroupLayouts,
520 surface_format: wgpu::TextureFormat,
521 alpha_mode: wgpu::CompositeAlphaMode,
522 path_sample_count: u32,
523 dual_source_blending: bool,
524 ) -> WgpuPipelines {
525 let shader_source = include_str!("shaders.wgsl");
526 let shader_module = device.create_shader_module(wgpu::ShaderModuleDescriptor {
527 label: Some("gpui_shaders"),
528 source: wgpu::ShaderSource::Wgsl(shader_source.into()),
529 });
530
531 let blend_mode = match alpha_mode {
532 wgpu::CompositeAlphaMode::PreMultiplied => {
533 wgpu::BlendState::PREMULTIPLIED_ALPHA_BLENDING
534 }
535 _ => wgpu::BlendState::ALPHA_BLENDING,
536 };
537
538 let color_target = wgpu::ColorTargetState {
539 format: surface_format,
540 blend: Some(blend_mode),
541 write_mask: wgpu::ColorWrites::ALL,
542 };
543
544 let create_pipeline = |name: &str,
545 vs_entry: &str,
546 fs_entry: &str,
547 globals_layout: &wgpu::BindGroupLayout,
548 data_layout: &wgpu::BindGroupLayout,
549 topology: wgpu::PrimitiveTopology,
550 color_targets: &[Option<wgpu::ColorTargetState>],
551 sample_count: u32| {
552 let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
553 label: Some(&format!("{name}_layout")),
554 bind_group_layouts: &[globals_layout, data_layout],
555 immediate_size: 0,
556 });
557
558 device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
559 label: Some(name),
560 layout: Some(&pipeline_layout),
561 vertex: wgpu::VertexState {
562 module: &shader_module,
563 entry_point: Some(vs_entry),
564 buffers: &[],
565 compilation_options: wgpu::PipelineCompilationOptions::default(),
566 },
567 fragment: Some(wgpu::FragmentState {
568 module: &shader_module,
569 entry_point: Some(fs_entry),
570 targets: color_targets,
571 compilation_options: wgpu::PipelineCompilationOptions::default(),
572 }),
573 primitive: wgpu::PrimitiveState {
574 topology,
575 strip_index_format: None,
576 front_face: wgpu::FrontFace::Ccw,
577 cull_mode: None,
578 polygon_mode: wgpu::PolygonMode::Fill,
579 unclipped_depth: false,
580 conservative: false,
581 },
582 depth_stencil: None,
583 multisample: wgpu::MultisampleState {
584 count: sample_count,
585 mask: !0,
586 alpha_to_coverage_enabled: false,
587 },
588 multiview_mask: None,
589 cache: None,
590 })
591 };
592
593 let quads = create_pipeline(
594 "quads",
595 "vs_quad",
596 "fs_quad",
597 &layouts.globals,
598 &layouts.instances,
599 wgpu::PrimitiveTopology::TriangleStrip,
600 &[Some(color_target.clone())],
601 1,
602 );
603
604 let shadows = create_pipeline(
605 "shadows",
606 "vs_shadow",
607 "fs_shadow",
608 &layouts.globals,
609 &layouts.instances,
610 wgpu::PrimitiveTopology::TriangleStrip,
611 &[Some(color_target.clone())],
612 1,
613 );
614
615 let path_rasterization = create_pipeline(
616 "path_rasterization",
617 "vs_path_rasterization",
618 "fs_path_rasterization",
619 &layouts.globals,
620 &layouts.instances,
621 wgpu::PrimitiveTopology::TriangleList,
622 &[Some(wgpu::ColorTargetState {
623 format: surface_format,
624 blend: Some(wgpu::BlendState::PREMULTIPLIED_ALPHA_BLENDING),
625 write_mask: wgpu::ColorWrites::ALL,
626 })],
627 path_sample_count,
628 );
629
630 let paths_blend = wgpu::BlendState {
631 color: wgpu::BlendComponent {
632 src_factor: wgpu::BlendFactor::One,
633 dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
634 operation: wgpu::BlendOperation::Add,
635 },
636 alpha: wgpu::BlendComponent {
637 src_factor: wgpu::BlendFactor::One,
638 dst_factor: wgpu::BlendFactor::One,
639 operation: wgpu::BlendOperation::Add,
640 },
641 };
642
643 let paths = create_pipeline(
644 "paths",
645 "vs_path",
646 "fs_path",
647 &layouts.globals,
648 &layouts.instances_with_texture,
649 wgpu::PrimitiveTopology::TriangleStrip,
650 &[Some(wgpu::ColorTargetState {
651 format: surface_format,
652 blend: Some(paths_blend),
653 write_mask: wgpu::ColorWrites::ALL,
654 })],
655 1,
656 );
657
658 let underlines = create_pipeline(
659 "underlines",
660 "vs_underline",
661 "fs_underline",
662 &layouts.globals,
663 &layouts.instances,
664 wgpu::PrimitiveTopology::TriangleStrip,
665 &[Some(color_target.clone())],
666 1,
667 );
668
669 let mono_sprites = create_pipeline(
670 "mono_sprites",
671 "vs_mono_sprite",
672 "fs_mono_sprite",
673 &layouts.globals,
674 &layouts.instances_with_texture,
675 wgpu::PrimitiveTopology::TriangleStrip,
676 &[Some(color_target.clone())],
677 1,
678 );
679
680 let subpixel_sprites = if dual_source_blending {
681 let subpixel_blend = wgpu::BlendState {
682 color: wgpu::BlendComponent {
683 src_factor: wgpu::BlendFactor::Src1,
684 dst_factor: wgpu::BlendFactor::OneMinusSrc1,
685 operation: wgpu::BlendOperation::Add,
686 },
687 alpha: wgpu::BlendComponent {
688 src_factor: wgpu::BlendFactor::One,
689 dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
690 operation: wgpu::BlendOperation::Add,
691 },
692 };
693
694 Some(create_pipeline(
695 "subpixel_sprites",
696 "vs_subpixel_sprite",
697 "fs_subpixel_sprite",
698 &layouts.globals,
699 &layouts.instances_with_texture,
700 wgpu::PrimitiveTopology::TriangleStrip,
701 &[Some(wgpu::ColorTargetState {
702 format: surface_format,
703 blend: Some(subpixel_blend),
704 write_mask: wgpu::ColorWrites::COLOR,
705 })],
706 1,
707 ))
708 } else {
709 None
710 };
711
712 let poly_sprites = create_pipeline(
713 "poly_sprites",
714 "vs_poly_sprite",
715 "fs_poly_sprite",
716 &layouts.globals,
717 &layouts.instances_with_texture,
718 wgpu::PrimitiveTopology::TriangleStrip,
719 &[Some(color_target.clone())],
720 1,
721 );
722
723 let surfaces = create_pipeline(
724 "surfaces",
725 "vs_surface",
726 "fs_surface",
727 &layouts.globals,
728 &layouts.surfaces,
729 wgpu::PrimitiveTopology::TriangleStrip,
730 &[Some(color_target)],
731 1,
732 );
733
734 WgpuPipelines {
735 quads,
736 shadows,
737 path_rasterization,
738 paths,
739 underlines,
740 mono_sprites,
741 subpixel_sprites,
742 poly_sprites,
743 surfaces,
744 }
745 }
746
747 fn create_path_intermediate(
748 device: &wgpu::Device,
749 format: wgpu::TextureFormat,
750 width: u32,
751 height: u32,
752 ) -> (wgpu::Texture, wgpu::TextureView) {
753 let texture = device.create_texture(&wgpu::TextureDescriptor {
754 label: Some("path_intermediate"),
755 size: wgpu::Extent3d {
756 width: width.max(1),
757 height: height.max(1),
758 depth_or_array_layers: 1,
759 },
760 mip_level_count: 1,
761 sample_count: 1,
762 dimension: wgpu::TextureDimension::D2,
763 format,
764 usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::TEXTURE_BINDING,
765 view_formats: &[],
766 });
767 let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
768 (texture, view)
769 }
770
771 fn create_msaa_if_needed(
772 device: &wgpu::Device,
773 format: wgpu::TextureFormat,
774 width: u32,
775 height: u32,
776 sample_count: u32,
777 ) -> Option<(wgpu::Texture, wgpu::TextureView)> {
778 if sample_count <= 1 {
779 return None;
780 }
781 let texture = device.create_texture(&wgpu::TextureDescriptor {
782 label: Some("path_msaa"),
783 size: wgpu::Extent3d {
784 width: width.max(1),
785 height: height.max(1),
786 depth_or_array_layers: 1,
787 },
788 mip_level_count: 1,
789 sample_count,
790 dimension: wgpu::TextureDimension::D2,
791 format,
792 usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
793 view_formats: &[],
794 });
795 let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
796 Some((texture, view))
797 }
798
799 pub fn update_drawable_size(&mut self, size: Size<DevicePixels>) {
800 let width = size.width.0 as u32;
801 let height = size.height.0 as u32;
802
803 if width != self.surface_config.width || height != self.surface_config.height {
804 let clamped_width = width.min(self.max_texture_size);
805 let clamped_height = height.min(self.max_texture_size);
806
807 if clamped_width != width || clamped_height != height {
808 warn!(
809 "Requested surface size ({}, {}) exceeds maximum texture dimension {}. \
810 Clamping to ({}, {}). Window content may not fill the entire window.",
811 width, height, self.max_texture_size, clamped_width, clamped_height
812 );
813 }
814
815 // Wait for any in-flight GPU work to complete before destroying textures
816 if let Err(e) = self.device.poll(wgpu::PollType::Wait {
817 submission_index: None,
818 timeout: None,
819 }) {
820 warn!("Failed to poll device during resize: {e:?}");
821 }
822
823 // Destroy old textures before allocating new ones to avoid GPU memory spikes
824 if let Some(ref texture) = self.path_intermediate_texture {
825 texture.destroy();
826 }
827 if let Some(ref texture) = self.path_msaa_texture {
828 texture.destroy();
829 }
830
831 self.surface_config.width = clamped_width.max(1);
832 self.surface_config.height = clamped_height.max(1);
833 self.surface.configure(&self.device, &self.surface_config);
834
835 // Invalidate intermediate textures - they will be lazily recreated
836 // in draw() after we confirm the surface is healthy. This avoids
837 // panics when the device/surface is in an invalid state during resize.
838 self.path_intermediate_texture = None;
839 self.path_intermediate_view = None;
840 self.path_msaa_texture = None;
841 self.path_msaa_view = None;
842 }
843 }
844
845 fn ensure_intermediate_textures(&mut self) {
846 if self.path_intermediate_texture.is_some() {
847 return;
848 }
849
850 let (path_intermediate_texture, path_intermediate_view) = {
851 let (t, v) = Self::create_path_intermediate(
852 &self.device,
853 self.surface_config.format,
854 self.surface_config.width,
855 self.surface_config.height,
856 );
857 (Some(t), Some(v))
858 };
859 self.path_intermediate_texture = path_intermediate_texture;
860 self.path_intermediate_view = path_intermediate_view;
861
862 let (path_msaa_texture, path_msaa_view) = Self::create_msaa_if_needed(
863 &self.device,
864 self.surface_config.format,
865 self.surface_config.width,
866 self.surface_config.height,
867 self.rendering_params.path_sample_count,
868 )
869 .map(|(t, v)| (Some(t), Some(v)))
870 .unwrap_or((None, None));
871 self.path_msaa_texture = path_msaa_texture;
872 self.path_msaa_view = path_msaa_view;
873 }
874
875 pub fn update_transparency(&mut self, transparent: bool) {
876 let new_alpha_mode = if transparent {
877 self.transparent_alpha_mode
878 } else {
879 self.opaque_alpha_mode
880 };
881
882 if new_alpha_mode != self.surface_config.alpha_mode {
883 self.surface_config.alpha_mode = new_alpha_mode;
884 self.surface.configure(&self.device, &self.surface_config);
885 self.pipelines = Self::create_pipelines(
886 &self.device,
887 &self.bind_group_layouts,
888 self.surface_config.format,
889 self.surface_config.alpha_mode,
890 self.rendering_params.path_sample_count,
891 self.dual_source_blending,
892 );
893 }
894 }
895
896 #[allow(dead_code)]
897 pub fn viewport_size(&self) -> Size<DevicePixels> {
898 Size {
899 width: DevicePixels(self.surface_config.width as i32),
900 height: DevicePixels(self.surface_config.height as i32),
901 }
902 }
903
904 pub fn sprite_atlas(&self) -> &Arc<WgpuAtlas> {
905 &self.atlas
906 }
907
908 pub fn gpu_specs(&self) -> GpuSpecs {
909 GpuSpecs {
910 is_software_emulated: self.adapter_info.device_type == wgpu::DeviceType::Cpu,
911 device_name: self.adapter_info.name.clone(),
912 driver_name: self.adapter_info.driver.clone(),
913 driver_info: self.adapter_info.driver_info.clone(),
914 }
915 }
916
917 pub fn max_texture_size(&self) -> u32 {
918 self.max_texture_size
919 }
920
921 pub fn draw(&mut self, scene: &Scene) {
922 let last_error = self.last_error.lock().unwrap().take();
923 if let Some(error) = last_error {
924 self.failed_frame_count += 1;
925 log::error!(
926 "GPU error during frame (failure {} of 20): {error}",
927 self.failed_frame_count
928 );
929 if self.failed_frame_count > 20 {
930 panic!("Too many consecutive GPU errors. Last error: {error}");
931 }
932 } else {
933 self.failed_frame_count = 0;
934 }
935
936 self.atlas.before_frame();
937
938 let frame = match self.surface.get_current_texture() {
939 Ok(frame) => frame,
940 Err(wgpu::SurfaceError::Lost | wgpu::SurfaceError::Outdated) => {
941 self.surface.configure(&self.device, &self.surface_config);
942 return;
943 }
944 Err(e) => {
945 log::error!("Failed to acquire surface texture: {e}");
946 return;
947 }
948 };
949
950 // Now that we know the surface is healthy, ensure intermediate textures exist
951 self.ensure_intermediate_textures();
952
953 let frame_view = frame
954 .texture
955 .create_view(&wgpu::TextureViewDescriptor::default());
956
957 let gamma_params = GammaParams {
958 gamma_ratios: self.rendering_params.gamma_ratios,
959 grayscale_enhanced_contrast: self.rendering_params.grayscale_enhanced_contrast,
960 subpixel_enhanced_contrast: self.rendering_params.subpixel_enhanced_contrast,
961 _pad: [0.0; 2],
962 };
963
964 let globals = GlobalParams {
965 viewport_size: [
966 self.surface_config.width as f32,
967 self.surface_config.height as f32,
968 ],
969 premultiplied_alpha: if self.surface_config.alpha_mode
970 == wgpu::CompositeAlphaMode::PreMultiplied
971 {
972 1
973 } else {
974 0
975 },
976 pad: 0,
977 };
978
979 let path_globals = GlobalParams {
980 premultiplied_alpha: 0,
981 ..globals
982 };
983
984 self.queue
985 .write_buffer(&self.globals_buffer, 0, bytemuck::bytes_of(&globals));
986 self.queue.write_buffer(
987 &self.globals_buffer,
988 self.path_globals_offset,
989 bytemuck::bytes_of(&path_globals),
990 );
991 self.queue.write_buffer(
992 &self.globals_buffer,
993 self.gamma_offset,
994 bytemuck::bytes_of(&gamma_params),
995 );
996
997 loop {
998 let mut instance_offset: u64 = 0;
999 let mut overflow = false;
1000
1001 let mut encoder = self
1002 .device
1003 .create_command_encoder(&wgpu::CommandEncoderDescriptor {
1004 label: Some("main_encoder"),
1005 });
1006
1007 {
1008 let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
1009 label: Some("main_pass"),
1010 color_attachments: &[Some(wgpu::RenderPassColorAttachment {
1011 view: &frame_view,
1012 resolve_target: None,
1013 ops: wgpu::Operations {
1014 load: wgpu::LoadOp::Clear(wgpu::Color::TRANSPARENT),
1015 store: wgpu::StoreOp::Store,
1016 },
1017 depth_slice: None,
1018 })],
1019 depth_stencil_attachment: None,
1020 ..Default::default()
1021 });
1022
1023 for batch in scene.batches() {
1024 let ok = match batch {
1025 PrimitiveBatch::Quads(range) => {
1026 self.draw_quads(&scene.quads[range], &mut instance_offset, &mut pass)
1027 }
1028 PrimitiveBatch::Shadows(range) => self.draw_shadows(
1029 &scene.shadows[range],
1030 &mut instance_offset,
1031 &mut pass,
1032 ),
1033 PrimitiveBatch::Paths(range) => {
1034 let paths = &scene.paths[range];
1035 if paths.is_empty() {
1036 continue;
1037 }
1038
1039 drop(pass);
1040
1041 let did_draw = self.draw_paths_to_intermediate(
1042 &mut encoder,
1043 paths,
1044 &mut instance_offset,
1045 );
1046
1047 pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
1048 label: Some("main_pass_continued"),
1049 color_attachments: &[Some(wgpu::RenderPassColorAttachment {
1050 view: &frame_view,
1051 resolve_target: None,
1052 ops: wgpu::Operations {
1053 load: wgpu::LoadOp::Load,
1054 store: wgpu::StoreOp::Store,
1055 },
1056 depth_slice: None,
1057 })],
1058 depth_stencil_attachment: None,
1059 ..Default::default()
1060 });
1061
1062 if did_draw {
1063 self.draw_paths_from_intermediate(
1064 paths,
1065 &mut instance_offset,
1066 &mut pass,
1067 )
1068 } else {
1069 false
1070 }
1071 }
1072 PrimitiveBatch::Underlines(range) => self.draw_underlines(
1073 &scene.underlines[range],
1074 &mut instance_offset,
1075 &mut pass,
1076 ),
1077 PrimitiveBatch::MonochromeSprites { texture_id, range } => self
1078 .draw_monochrome_sprites(
1079 &scene.monochrome_sprites[range],
1080 texture_id,
1081 &mut instance_offset,
1082 &mut pass,
1083 ),
1084 PrimitiveBatch::SubpixelSprites { texture_id, range } => self
1085 .draw_subpixel_sprites(
1086 &scene.subpixel_sprites[range],
1087 texture_id,
1088 &mut instance_offset,
1089 &mut pass,
1090 ),
1091 PrimitiveBatch::PolychromeSprites { texture_id, range } => self
1092 .draw_polychrome_sprites(
1093 &scene.polychrome_sprites[range],
1094 texture_id,
1095 &mut instance_offset,
1096 &mut pass,
1097 ),
1098 PrimitiveBatch::Surfaces(_surfaces) => {
1099 // Surfaces are macOS-only for video playback
1100 // Not implemented for Linux/wgpu
1101 true
1102 }
1103 };
1104 if !ok {
1105 overflow = true;
1106 break;
1107 }
1108 }
1109 }
1110
1111 if overflow {
1112 drop(encoder);
1113 if self.instance_buffer_capacity >= 256 * 1024 * 1024 {
1114 log::error!(
1115 "instance buffer size grew too large: {}",
1116 self.instance_buffer_capacity
1117 );
1118 frame.present();
1119 return;
1120 }
1121 self.grow_instance_buffer();
1122 continue;
1123 }
1124
1125 self.queue.submit(std::iter::once(encoder.finish()));
1126 frame.present();
1127 return;
1128 }
1129 }
1130
1131 fn draw_quads(
1132 &self,
1133 quads: &[Quad],
1134 instance_offset: &mut u64,
1135 pass: &mut wgpu::RenderPass<'_>,
1136 ) -> bool {
1137 let data = unsafe { Self::instance_bytes(quads) };
1138 self.draw_instances(
1139 data,
1140 quads.len() as u32,
1141 &self.pipelines.quads,
1142 instance_offset,
1143 pass,
1144 )
1145 }
1146
1147 fn draw_shadows(
1148 &self,
1149 shadows: &[Shadow],
1150 instance_offset: &mut u64,
1151 pass: &mut wgpu::RenderPass<'_>,
1152 ) -> bool {
1153 let data = unsafe { Self::instance_bytes(shadows) };
1154 self.draw_instances(
1155 data,
1156 shadows.len() as u32,
1157 &self.pipelines.shadows,
1158 instance_offset,
1159 pass,
1160 )
1161 }
1162
1163 fn draw_underlines(
1164 &self,
1165 underlines: &[Underline],
1166 instance_offset: &mut u64,
1167 pass: &mut wgpu::RenderPass<'_>,
1168 ) -> bool {
1169 let data = unsafe { Self::instance_bytes(underlines) };
1170 self.draw_instances(
1171 data,
1172 underlines.len() as u32,
1173 &self.pipelines.underlines,
1174 instance_offset,
1175 pass,
1176 )
1177 }
1178
1179 fn draw_monochrome_sprites(
1180 &self,
1181 sprites: &[MonochromeSprite],
1182 texture_id: AtlasTextureId,
1183 instance_offset: &mut u64,
1184 pass: &mut wgpu::RenderPass<'_>,
1185 ) -> bool {
1186 let tex_info = self.atlas.get_texture_info(texture_id);
1187 let data = unsafe { Self::instance_bytes(sprites) };
1188 self.draw_instances_with_texture(
1189 data,
1190 sprites.len() as u32,
1191 &tex_info.view,
1192 &self.pipelines.mono_sprites,
1193 instance_offset,
1194 pass,
1195 )
1196 }
1197
1198 fn draw_subpixel_sprites(
1199 &self,
1200 sprites: &[SubpixelSprite],
1201 texture_id: AtlasTextureId,
1202 instance_offset: &mut u64,
1203 pass: &mut wgpu::RenderPass<'_>,
1204 ) -> bool {
1205 let tex_info = self.atlas.get_texture_info(texture_id);
1206 let data = unsafe { Self::instance_bytes(sprites) };
1207 let pipeline = self
1208 .pipelines
1209 .subpixel_sprites
1210 .as_ref()
1211 .unwrap_or(&self.pipelines.mono_sprites);
1212 self.draw_instances_with_texture(
1213 data,
1214 sprites.len() as u32,
1215 &tex_info.view,
1216 pipeline,
1217 instance_offset,
1218 pass,
1219 )
1220 }
1221
1222 fn draw_polychrome_sprites(
1223 &self,
1224 sprites: &[PolychromeSprite],
1225 texture_id: AtlasTextureId,
1226 instance_offset: &mut u64,
1227 pass: &mut wgpu::RenderPass<'_>,
1228 ) -> bool {
1229 let tex_info = self.atlas.get_texture_info(texture_id);
1230 let data = unsafe { Self::instance_bytes(sprites) };
1231 self.draw_instances_with_texture(
1232 data,
1233 sprites.len() as u32,
1234 &tex_info.view,
1235 &self.pipelines.poly_sprites,
1236 instance_offset,
1237 pass,
1238 )
1239 }
1240
1241 fn draw_instances(
1242 &self,
1243 data: &[u8],
1244 instance_count: u32,
1245 pipeline: &wgpu::RenderPipeline,
1246 instance_offset: &mut u64,
1247 pass: &mut wgpu::RenderPass<'_>,
1248 ) -> bool {
1249 if instance_count == 0 {
1250 return true;
1251 }
1252 let Some((offset, size)) = self.write_to_instance_buffer(instance_offset, data) else {
1253 return false;
1254 };
1255 let bind_group = self.device.create_bind_group(&wgpu::BindGroupDescriptor {
1256 label: None,
1257 layout: &self.bind_group_layouts.instances,
1258 entries: &[wgpu::BindGroupEntry {
1259 binding: 0,
1260 resource: self.instance_binding(offset, size),
1261 }],
1262 });
1263 pass.set_pipeline(pipeline);
1264 pass.set_bind_group(0, &self.globals_bind_group, &[]);
1265 pass.set_bind_group(1, &bind_group, &[]);
1266 pass.draw(0..4, 0..instance_count);
1267 true
1268 }
1269
1270 fn draw_instances_with_texture(
1271 &self,
1272 data: &[u8],
1273 instance_count: u32,
1274 texture_view: &wgpu::TextureView,
1275 pipeline: &wgpu::RenderPipeline,
1276 instance_offset: &mut u64,
1277 pass: &mut wgpu::RenderPass<'_>,
1278 ) -> bool {
1279 if instance_count == 0 {
1280 return true;
1281 }
1282 let Some((offset, size)) = self.write_to_instance_buffer(instance_offset, data) else {
1283 return false;
1284 };
1285 let bind_group = self.device.create_bind_group(&wgpu::BindGroupDescriptor {
1286 label: None,
1287 layout: &self.bind_group_layouts.instances_with_texture,
1288 entries: &[
1289 wgpu::BindGroupEntry {
1290 binding: 0,
1291 resource: self.instance_binding(offset, size),
1292 },
1293 wgpu::BindGroupEntry {
1294 binding: 1,
1295 resource: wgpu::BindingResource::TextureView(texture_view),
1296 },
1297 wgpu::BindGroupEntry {
1298 binding: 2,
1299 resource: wgpu::BindingResource::Sampler(&self.atlas_sampler),
1300 },
1301 ],
1302 });
1303 pass.set_pipeline(pipeline);
1304 pass.set_bind_group(0, &self.globals_bind_group, &[]);
1305 pass.set_bind_group(1, &bind_group, &[]);
1306 pass.draw(0..4, 0..instance_count);
1307 true
1308 }
1309
1310 unsafe fn instance_bytes<T>(instances: &[T]) -> &[u8] {
1311 unsafe {
1312 std::slice::from_raw_parts(
1313 instances.as_ptr() as *const u8,
1314 std::mem::size_of_val(instances),
1315 )
1316 }
1317 }
1318
1319 fn draw_paths_from_intermediate(
1320 &self,
1321 paths: &[Path<ScaledPixels>],
1322 instance_offset: &mut u64,
1323 pass: &mut wgpu::RenderPass<'_>,
1324 ) -> bool {
1325 let first_path = &paths[0];
1326 let sprites: Vec<PathSprite> = if paths.last().map(|p| &p.order) == Some(&first_path.order)
1327 {
1328 paths
1329 .iter()
1330 .map(|p| PathSprite {
1331 bounds: p.clipped_bounds(),
1332 })
1333 .collect()
1334 } else {
1335 let mut bounds = first_path.clipped_bounds();
1336 for path in paths.iter().skip(1) {
1337 bounds = bounds.union(&path.clipped_bounds());
1338 }
1339 vec![PathSprite { bounds }]
1340 };
1341
1342 let Some(path_intermediate_view) = self.path_intermediate_view.as_ref() else {
1343 return true;
1344 };
1345
1346 let sprite_data = unsafe { Self::instance_bytes(&sprites) };
1347 self.draw_instances_with_texture(
1348 sprite_data,
1349 sprites.len() as u32,
1350 path_intermediate_view,
1351 &self.pipelines.paths,
1352 instance_offset,
1353 pass,
1354 )
1355 }
1356
1357 fn draw_paths_to_intermediate(
1358 &self,
1359 encoder: &mut wgpu::CommandEncoder,
1360 paths: &[Path<ScaledPixels>],
1361 instance_offset: &mut u64,
1362 ) -> bool {
1363 let mut vertices = Vec::new();
1364 for path in paths {
1365 let bounds = path.clipped_bounds();
1366 vertices.extend(path.vertices.iter().map(|v| PathRasterizationVertex {
1367 xy_position: v.xy_position,
1368 st_position: v.st_position,
1369 color: path.color,
1370 bounds,
1371 }));
1372 }
1373
1374 if vertices.is_empty() {
1375 return true;
1376 }
1377
1378 let vertex_data = unsafe { Self::instance_bytes(&vertices) };
1379 let Some((vertex_offset, vertex_size)) =
1380 self.write_to_instance_buffer(instance_offset, vertex_data)
1381 else {
1382 return false;
1383 };
1384
1385 let data_bind_group = self.device.create_bind_group(&wgpu::BindGroupDescriptor {
1386 label: Some("path_rasterization_bind_group"),
1387 layout: &self.bind_group_layouts.instances,
1388 entries: &[wgpu::BindGroupEntry {
1389 binding: 0,
1390 resource: self.instance_binding(vertex_offset, vertex_size),
1391 }],
1392 });
1393
1394 let Some(path_intermediate_view) = self.path_intermediate_view.as_ref() else {
1395 return true;
1396 };
1397
1398 let (target_view, resolve_target) = if let Some(ref msaa_view) = self.path_msaa_view {
1399 (msaa_view, Some(path_intermediate_view))
1400 } else {
1401 (path_intermediate_view, None)
1402 };
1403
1404 {
1405 let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
1406 label: Some("path_rasterization_pass"),
1407 color_attachments: &[Some(wgpu::RenderPassColorAttachment {
1408 view: target_view,
1409 resolve_target,
1410 ops: wgpu::Operations {
1411 load: wgpu::LoadOp::Clear(wgpu::Color::TRANSPARENT),
1412 store: wgpu::StoreOp::Store,
1413 },
1414 depth_slice: None,
1415 })],
1416 depth_stencil_attachment: None,
1417 ..Default::default()
1418 });
1419
1420 pass.set_pipeline(&self.pipelines.path_rasterization);
1421 pass.set_bind_group(0, &self.path_globals_bind_group, &[]);
1422 pass.set_bind_group(1, &data_bind_group, &[]);
1423 pass.draw(0..vertices.len() as u32, 0..1);
1424 }
1425
1426 true
1427 }
1428
1429 fn grow_instance_buffer(&mut self) {
1430 let new_capacity = self.instance_buffer_capacity * 2;
1431 log::info!("increased instance buffer size to {}", new_capacity);
1432 self.instance_buffer = self.device.create_buffer(&wgpu::BufferDescriptor {
1433 label: Some("instance_buffer"),
1434 size: new_capacity,
1435 usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
1436 mapped_at_creation: false,
1437 });
1438 self.instance_buffer_capacity = new_capacity;
1439 }
1440
1441 fn write_to_instance_buffer(
1442 &self,
1443 instance_offset: &mut u64,
1444 data: &[u8],
1445 ) -> Option<(u64, NonZeroU64)> {
1446 let offset = (*instance_offset).next_multiple_of(self.storage_buffer_alignment);
1447 let size = (data.len() as u64).max(16);
1448 if offset + size > self.instance_buffer_capacity {
1449 return None;
1450 }
1451 self.queue.write_buffer(&self.instance_buffer, offset, data);
1452 *instance_offset = offset + size;
1453 Some((offset, NonZeroU64::new(size).expect("size is at least 16")))
1454 }
1455
1456 fn instance_binding(&self, offset: u64, size: NonZeroU64) -> wgpu::BindingResource<'_> {
1457 wgpu::BindingResource::Buffer(wgpu::BufferBinding {
1458 buffer: &self.instance_buffer,
1459 offset,
1460 size: Some(size),
1461 })
1462 }
1463
1464 pub fn destroy(&mut self) {
1465 // wgpu resources are automatically cleaned up when dropped
1466 }
1467}
1468
1469struct RenderingParameters {
1470 path_sample_count: u32,
1471 gamma_ratios: [f32; 4],
1472 grayscale_enhanced_contrast: f32,
1473 subpixel_enhanced_contrast: f32,
1474}
1475
1476impl RenderingParameters {
1477 fn new(adapter: &wgpu::Adapter, surface_format: wgpu::TextureFormat) -> Self {
1478 use std::env;
1479
1480 let format_features = adapter.get_texture_format_features(surface_format);
1481 let path_sample_count = [4, 2, 1]
1482 .into_iter()
1483 .find(|&n| format_features.flags.sample_count_supported(n))
1484 .unwrap_or(1);
1485
1486 let gamma = env::var("ZED_FONTS_GAMMA")
1487 .ok()
1488 .and_then(|v| v.parse().ok())
1489 .unwrap_or(1.8_f32)
1490 .clamp(1.0, 2.2);
1491 let gamma_ratios = get_gamma_correction_ratios(gamma);
1492
1493 let grayscale_enhanced_contrast = env::var("ZED_FONTS_GRAYSCALE_ENHANCED_CONTRAST")
1494 .ok()
1495 .and_then(|v| v.parse().ok())
1496 .unwrap_or(1.0_f32)
1497 .max(0.0);
1498
1499 let subpixel_enhanced_contrast = env::var("ZED_FONTS_SUBPIXEL_ENHANCED_CONTRAST")
1500 .ok()
1501 .and_then(|v| v.parse().ok())
1502 .unwrap_or(0.5_f32)
1503 .max(0.0);
1504
1505 Self {
1506 path_sample_count,
1507 gamma_ratios,
1508 grayscale_enhanced_contrast,
1509 subpixel_enhanced_contrast,
1510 }
1511 }
1512}