1use crate::{WgpuAtlas, WgpuContext};
2use bytemuck::{Pod, Zeroable};
3use gpui::{
4 AtlasTextureId, Background, Bounds, DevicePixels, GpuSpecs, MonochromeSprite, Path, Point,
5 PolychromeSprite, PrimitiveBatch, Quad, ScaledPixels, Scene, Shadow, Size, SubpixelSprite,
6 Underline, get_gamma_correction_ratios,
7};
8use log::warn;
9use raw_window_handle::{HasDisplayHandle, HasWindowHandle};
10use std::num::NonZeroU64;
11use std::sync::{Arc, Mutex};
12
13#[repr(C)]
14#[derive(Clone, Copy, Pod, Zeroable)]
15struct GlobalParams {
16 viewport_size: [f32; 2],
17 premultiplied_alpha: u32,
18 pad: u32,
19}
20
21#[repr(C)]
22#[derive(Clone, Copy, Pod, Zeroable)]
23struct PodBounds {
24 origin: [f32; 2],
25 size: [f32; 2],
26}
27
28impl From<Bounds<ScaledPixels>> for PodBounds {
29 fn from(bounds: Bounds<ScaledPixels>) -> Self {
30 Self {
31 origin: [bounds.origin.x.0, bounds.origin.y.0],
32 size: [bounds.size.width.0, bounds.size.height.0],
33 }
34 }
35}
36
37#[repr(C)]
38#[derive(Clone, Copy, Pod, Zeroable)]
39struct SurfaceParams {
40 bounds: PodBounds,
41 content_mask: PodBounds,
42}
43
44#[repr(C)]
45#[derive(Clone, Copy, Pod, Zeroable)]
46struct GammaParams {
47 gamma_ratios: [f32; 4],
48 grayscale_enhanced_contrast: f32,
49 subpixel_enhanced_contrast: f32,
50 _pad: [f32; 2],
51}
52
53#[derive(Clone, Debug)]
54#[repr(C)]
55struct PathSprite {
56 bounds: Bounds<ScaledPixels>,
57}
58
59#[derive(Clone, Debug)]
60#[repr(C)]
61struct PathRasterizationVertex {
62 xy_position: Point<ScaledPixels>,
63 st_position: Point<f32>,
64 color: Background,
65 bounds: Bounds<ScaledPixels>,
66}
67
68pub struct WgpuSurfaceConfig {
69 pub size: Size<DevicePixels>,
70 pub transparent: bool,
71}
72
73struct WgpuPipelines {
74 quads: wgpu::RenderPipeline,
75 shadows: wgpu::RenderPipeline,
76 path_rasterization: wgpu::RenderPipeline,
77 paths: wgpu::RenderPipeline,
78 underlines: wgpu::RenderPipeline,
79 mono_sprites: wgpu::RenderPipeline,
80 subpixel_sprites: Option<wgpu::RenderPipeline>,
81 poly_sprites: wgpu::RenderPipeline,
82 #[allow(dead_code)]
83 surfaces: wgpu::RenderPipeline,
84}
85
86struct WgpuBindGroupLayouts {
87 globals: wgpu::BindGroupLayout,
88 instances: wgpu::BindGroupLayout,
89 instances_with_texture: wgpu::BindGroupLayout,
90 surfaces: wgpu::BindGroupLayout,
91}
92
93pub struct WgpuRenderer {
94 device: Arc<wgpu::Device>,
95 queue: Arc<wgpu::Queue>,
96 surface: wgpu::Surface<'static>,
97 surface_config: wgpu::SurfaceConfiguration,
98 pipelines: WgpuPipelines,
99 bind_group_layouts: WgpuBindGroupLayouts,
100 atlas: Arc<WgpuAtlas>,
101 atlas_sampler: wgpu::Sampler,
102 globals_buffer: wgpu::Buffer,
103 path_globals_offset: u64,
104 gamma_offset: u64,
105 globals_bind_group: wgpu::BindGroup,
106 path_globals_bind_group: wgpu::BindGroup,
107 instance_buffer: wgpu::Buffer,
108 instance_buffer_capacity: u64,
109 storage_buffer_alignment: u64,
110 path_intermediate_texture: Option<wgpu::Texture>,
111 path_intermediate_view: Option<wgpu::TextureView>,
112 path_msaa_texture: Option<wgpu::Texture>,
113 path_msaa_view: Option<wgpu::TextureView>,
114 rendering_params: RenderingParameters,
115 dual_source_blending: bool,
116 adapter_info: wgpu::AdapterInfo,
117 transparent_alpha_mode: wgpu::CompositeAlphaMode,
118 opaque_alpha_mode: wgpu::CompositeAlphaMode,
119 max_texture_size: u32,
120 last_error: Arc<Mutex<Option<String>>>,
121 failed_frame_count: u32,
122}
123
124impl WgpuRenderer {
125 /// Creates a new WgpuRenderer from raw window handles.
126 ///
127 /// # Safety
128 /// The caller must ensure that the window handle remains valid for the lifetime
129 /// of the returned renderer.
130 pub fn new<W: HasWindowHandle + HasDisplayHandle>(
131 gpu_context: &mut Option<WgpuContext>,
132 window: &W,
133 config: WgpuSurfaceConfig,
134 ) -> anyhow::Result<Self> {
135 let window_handle = window
136 .window_handle()
137 .map_err(|e| anyhow::anyhow!("Failed to get window handle: {e}"))?;
138 let display_handle = window
139 .display_handle()
140 .map_err(|e| anyhow::anyhow!("Failed to get display handle: {e}"))?;
141
142 let target = wgpu::SurfaceTargetUnsafe::RawHandle {
143 raw_display_handle: display_handle.as_raw(),
144 raw_window_handle: window_handle.as_raw(),
145 };
146
147 // Use the existing context's instance if available, otherwise create a new one.
148 // The surface must be created with the same instance that will be used for
149 // adapter selection, otherwise wgpu will panic.
150 let instance = gpu_context
151 .as_ref()
152 .map(|ctx| ctx.instance.clone())
153 .unwrap_or_else(WgpuContext::instance);
154
155 // Safety: The caller guarantees that the window handle is valid for the
156 // lifetime of this renderer. In practice, the RawWindow struct is created
157 // from the native window handles and the surface is dropped before the window.
158 let surface = unsafe {
159 instance
160 .create_surface_unsafe(target)
161 .map_err(|e| anyhow::anyhow!("Failed to create surface: {e}"))?
162 };
163
164 let context = match gpu_context {
165 Some(context) => {
166 context.check_compatible_with_surface(&surface)?;
167 context
168 }
169 None => gpu_context.insert(WgpuContext::new(instance, &surface)?),
170 };
171
172 let surface_caps = surface.get_capabilities(&context.adapter);
173 let preferred_formats = [
174 wgpu::TextureFormat::Bgra8Unorm,
175 wgpu::TextureFormat::Rgba8Unorm,
176 ];
177 let surface_format = preferred_formats
178 .iter()
179 .find(|f| surface_caps.formats.contains(f))
180 .copied()
181 .or_else(|| surface_caps.formats.iter().find(|f| !f.is_srgb()).copied())
182 .or_else(|| surface_caps.formats.first().copied())
183 .ok_or_else(|| {
184 anyhow::anyhow!(
185 "Surface reports no supported texture formats for adapter {:?}",
186 context.adapter.get_info().name
187 )
188 })?;
189
190 let pick_alpha_mode =
191 |preferences: &[wgpu::CompositeAlphaMode]| -> anyhow::Result<wgpu::CompositeAlphaMode> {
192 preferences
193 .iter()
194 .find(|p| surface_caps.alpha_modes.contains(p))
195 .copied()
196 .or_else(|| surface_caps.alpha_modes.first().copied())
197 .ok_or_else(|| {
198 anyhow::anyhow!(
199 "Surface reports no supported alpha modes for adapter {:?}",
200 context.adapter.get_info().name
201 )
202 })
203 };
204
205 let transparent_alpha_mode = pick_alpha_mode(&[
206 wgpu::CompositeAlphaMode::PreMultiplied,
207 wgpu::CompositeAlphaMode::Inherit,
208 ])?;
209
210 let opaque_alpha_mode = pick_alpha_mode(&[
211 wgpu::CompositeAlphaMode::Opaque,
212 wgpu::CompositeAlphaMode::Inherit,
213 ])?;
214
215 let alpha_mode = if config.transparent {
216 transparent_alpha_mode
217 } else {
218 opaque_alpha_mode
219 };
220
221 let device = Arc::clone(&context.device);
222 let max_texture_size = device.limits().max_texture_dimension_2d;
223
224 let requested_width = config.size.width.0 as u32;
225 let requested_height = config.size.height.0 as u32;
226 let clamped_width = requested_width.min(max_texture_size);
227 let clamped_height = requested_height.min(max_texture_size);
228
229 if clamped_width != requested_width || clamped_height != requested_height {
230 warn!(
231 "Requested surface size ({}, {}) exceeds maximum texture dimension {}. \
232 Clamping to ({}, {}). Window content may not fill the entire window.",
233 requested_width, requested_height, max_texture_size, clamped_width, clamped_height
234 );
235 }
236
237 let surface_config = wgpu::SurfaceConfiguration {
238 usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
239 format: surface_format,
240 width: clamped_width.max(1),
241 height: clamped_height.max(1),
242 present_mode: wgpu::PresentMode::Fifo,
243 desired_maximum_frame_latency: 2,
244 alpha_mode,
245 view_formats: vec![],
246 };
247 surface.configure(&context.device, &surface_config);
248
249 let queue = Arc::clone(&context.queue);
250 let dual_source_blending = context.supports_dual_source_blending();
251
252 let rendering_params = RenderingParameters::new(&context.adapter, surface_format);
253 let bind_group_layouts = Self::create_bind_group_layouts(&device);
254 let pipelines = Self::create_pipelines(
255 &device,
256 &bind_group_layouts,
257 surface_format,
258 alpha_mode,
259 rendering_params.path_sample_count,
260 dual_source_blending,
261 );
262
263 let atlas = Arc::new(WgpuAtlas::new(Arc::clone(&device), Arc::clone(&queue)));
264 let atlas_sampler = device.create_sampler(&wgpu::SamplerDescriptor {
265 label: Some("atlas_sampler"),
266 mag_filter: wgpu::FilterMode::Linear,
267 min_filter: wgpu::FilterMode::Linear,
268 ..Default::default()
269 });
270
271 let uniform_alignment = device.limits().min_uniform_buffer_offset_alignment as u64;
272 let globals_size = std::mem::size_of::<GlobalParams>() as u64;
273 let gamma_size = std::mem::size_of::<GammaParams>() as u64;
274 let path_globals_offset = globals_size.next_multiple_of(uniform_alignment);
275 let gamma_offset = (path_globals_offset + globals_size).next_multiple_of(uniform_alignment);
276
277 let globals_buffer = device.create_buffer(&wgpu::BufferDescriptor {
278 label: Some("globals_buffer"),
279 size: gamma_offset + gamma_size,
280 usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
281 mapped_at_creation: false,
282 });
283
284 let storage_buffer_alignment = device.limits().min_storage_buffer_offset_alignment as u64;
285 let initial_instance_buffer_capacity = 2 * 1024 * 1024;
286 let instance_buffer = device.create_buffer(&wgpu::BufferDescriptor {
287 label: Some("instance_buffer"),
288 size: initial_instance_buffer_capacity,
289 usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
290 mapped_at_creation: false,
291 });
292
293 let globals_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
294 label: Some("globals_bind_group"),
295 layout: &bind_group_layouts.globals,
296 entries: &[
297 wgpu::BindGroupEntry {
298 binding: 0,
299 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
300 buffer: &globals_buffer,
301 offset: 0,
302 size: Some(NonZeroU64::new(globals_size).unwrap()),
303 }),
304 },
305 wgpu::BindGroupEntry {
306 binding: 1,
307 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
308 buffer: &globals_buffer,
309 offset: gamma_offset,
310 size: Some(NonZeroU64::new(gamma_size).unwrap()),
311 }),
312 },
313 ],
314 });
315
316 let path_globals_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
317 label: Some("path_globals_bind_group"),
318 layout: &bind_group_layouts.globals,
319 entries: &[
320 wgpu::BindGroupEntry {
321 binding: 0,
322 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
323 buffer: &globals_buffer,
324 offset: path_globals_offset,
325 size: Some(NonZeroU64::new(globals_size).unwrap()),
326 }),
327 },
328 wgpu::BindGroupEntry {
329 binding: 1,
330 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
331 buffer: &globals_buffer,
332 offset: gamma_offset,
333 size: Some(NonZeroU64::new(gamma_size).unwrap()),
334 }),
335 },
336 ],
337 });
338
339 let adapter_info = context.adapter.get_info();
340
341 let last_error: Arc<Mutex<Option<String>>> = Arc::new(Mutex::new(None));
342 let last_error_clone = Arc::clone(&last_error);
343 device.on_uncaptured_error(Arc::new(move |error| {
344 let mut guard = last_error_clone.lock().unwrap();
345 *guard = Some(error.to_string());
346 }));
347
348 Ok(Self {
349 device,
350 queue,
351 surface,
352 surface_config,
353 pipelines,
354 bind_group_layouts,
355 atlas,
356 atlas_sampler,
357 globals_buffer,
358 path_globals_offset,
359 gamma_offset,
360 globals_bind_group,
361 path_globals_bind_group,
362 instance_buffer,
363 instance_buffer_capacity: initial_instance_buffer_capacity,
364 storage_buffer_alignment,
365 // Defer intermediate texture creation to first draw call via ensure_intermediate_textures().
366 // This avoids panics when the device/surface is in an invalid state during initialization.
367 path_intermediate_texture: None,
368 path_intermediate_view: None,
369 path_msaa_texture: None,
370 path_msaa_view: None,
371 rendering_params,
372 dual_source_blending,
373 adapter_info,
374 transparent_alpha_mode,
375 opaque_alpha_mode,
376 max_texture_size,
377 last_error,
378 failed_frame_count: 0,
379 })
380 }
381
382 fn create_bind_group_layouts(device: &wgpu::Device) -> WgpuBindGroupLayouts {
383 let globals =
384 device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
385 label: Some("globals_layout"),
386 entries: &[
387 wgpu::BindGroupLayoutEntry {
388 binding: 0,
389 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
390 ty: wgpu::BindingType::Buffer {
391 ty: wgpu::BufferBindingType::Uniform,
392 has_dynamic_offset: false,
393 min_binding_size: NonZeroU64::new(
394 std::mem::size_of::<GlobalParams>() as u64
395 ),
396 },
397 count: None,
398 },
399 wgpu::BindGroupLayoutEntry {
400 binding: 1,
401 visibility: wgpu::ShaderStages::FRAGMENT,
402 ty: wgpu::BindingType::Buffer {
403 ty: wgpu::BufferBindingType::Uniform,
404 has_dynamic_offset: false,
405 min_binding_size: NonZeroU64::new(
406 std::mem::size_of::<GammaParams>() as u64
407 ),
408 },
409 count: None,
410 },
411 ],
412 });
413
414 let storage_buffer_entry = |binding: u32| wgpu::BindGroupLayoutEntry {
415 binding,
416 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
417 ty: wgpu::BindingType::Buffer {
418 ty: wgpu::BufferBindingType::Storage { read_only: true },
419 has_dynamic_offset: false,
420 min_binding_size: None,
421 },
422 count: None,
423 };
424
425 let instances = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
426 label: Some("instances_layout"),
427 entries: &[storage_buffer_entry(0)],
428 });
429
430 let instances_with_texture =
431 device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
432 label: Some("instances_with_texture_layout"),
433 entries: &[
434 storage_buffer_entry(0),
435 wgpu::BindGroupLayoutEntry {
436 binding: 1,
437 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
438 ty: wgpu::BindingType::Texture {
439 sample_type: wgpu::TextureSampleType::Float { filterable: true },
440 view_dimension: wgpu::TextureViewDimension::D2,
441 multisampled: false,
442 },
443 count: None,
444 },
445 wgpu::BindGroupLayoutEntry {
446 binding: 2,
447 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
448 ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
449 count: None,
450 },
451 ],
452 });
453
454 let surfaces = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
455 label: Some("surfaces_layout"),
456 entries: &[
457 wgpu::BindGroupLayoutEntry {
458 binding: 0,
459 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
460 ty: wgpu::BindingType::Buffer {
461 ty: wgpu::BufferBindingType::Uniform,
462 has_dynamic_offset: false,
463 min_binding_size: NonZeroU64::new(
464 std::mem::size_of::<SurfaceParams>() as u64
465 ),
466 },
467 count: None,
468 },
469 wgpu::BindGroupLayoutEntry {
470 binding: 1,
471 visibility: wgpu::ShaderStages::FRAGMENT,
472 ty: wgpu::BindingType::Texture {
473 sample_type: wgpu::TextureSampleType::Float { filterable: true },
474 view_dimension: wgpu::TextureViewDimension::D2,
475 multisampled: false,
476 },
477 count: None,
478 },
479 wgpu::BindGroupLayoutEntry {
480 binding: 2,
481 visibility: wgpu::ShaderStages::FRAGMENT,
482 ty: wgpu::BindingType::Texture {
483 sample_type: wgpu::TextureSampleType::Float { filterable: true },
484 view_dimension: wgpu::TextureViewDimension::D2,
485 multisampled: false,
486 },
487 count: None,
488 },
489 wgpu::BindGroupLayoutEntry {
490 binding: 3,
491 visibility: wgpu::ShaderStages::FRAGMENT,
492 ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
493 count: None,
494 },
495 ],
496 });
497
498 WgpuBindGroupLayouts {
499 globals,
500 instances,
501 instances_with_texture,
502 surfaces,
503 }
504 }
505
506 fn create_pipelines(
507 device: &wgpu::Device,
508 layouts: &WgpuBindGroupLayouts,
509 surface_format: wgpu::TextureFormat,
510 alpha_mode: wgpu::CompositeAlphaMode,
511 path_sample_count: u32,
512 dual_source_blending: bool,
513 ) -> WgpuPipelines {
514 let shader_source = include_str!("shaders.wgsl");
515 let shader_module = device.create_shader_module(wgpu::ShaderModuleDescriptor {
516 label: Some("gpui_shaders"),
517 source: wgpu::ShaderSource::Wgsl(shader_source.into()),
518 });
519
520 let blend_mode = match alpha_mode {
521 wgpu::CompositeAlphaMode::PreMultiplied => {
522 wgpu::BlendState::PREMULTIPLIED_ALPHA_BLENDING
523 }
524 _ => wgpu::BlendState::ALPHA_BLENDING,
525 };
526
527 let color_target = wgpu::ColorTargetState {
528 format: surface_format,
529 blend: Some(blend_mode),
530 write_mask: wgpu::ColorWrites::ALL,
531 };
532
533 let create_pipeline = |name: &str,
534 vs_entry: &str,
535 fs_entry: &str,
536 globals_layout: &wgpu::BindGroupLayout,
537 data_layout: &wgpu::BindGroupLayout,
538 topology: wgpu::PrimitiveTopology,
539 color_targets: &[Option<wgpu::ColorTargetState>],
540 sample_count: u32| {
541 let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
542 label: Some(&format!("{name}_layout")),
543 bind_group_layouts: &[globals_layout, data_layout],
544 immediate_size: 0,
545 });
546
547 device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
548 label: Some(name),
549 layout: Some(&pipeline_layout),
550 vertex: wgpu::VertexState {
551 module: &shader_module,
552 entry_point: Some(vs_entry),
553 buffers: &[],
554 compilation_options: wgpu::PipelineCompilationOptions::default(),
555 },
556 fragment: Some(wgpu::FragmentState {
557 module: &shader_module,
558 entry_point: Some(fs_entry),
559 targets: color_targets,
560 compilation_options: wgpu::PipelineCompilationOptions::default(),
561 }),
562 primitive: wgpu::PrimitiveState {
563 topology,
564 strip_index_format: None,
565 front_face: wgpu::FrontFace::Ccw,
566 cull_mode: None,
567 polygon_mode: wgpu::PolygonMode::Fill,
568 unclipped_depth: false,
569 conservative: false,
570 },
571 depth_stencil: None,
572 multisample: wgpu::MultisampleState {
573 count: sample_count,
574 mask: !0,
575 alpha_to_coverage_enabled: false,
576 },
577 multiview_mask: None,
578 cache: None,
579 })
580 };
581
582 let quads = create_pipeline(
583 "quads",
584 "vs_quad",
585 "fs_quad",
586 &layouts.globals,
587 &layouts.instances,
588 wgpu::PrimitiveTopology::TriangleStrip,
589 &[Some(color_target.clone())],
590 1,
591 );
592
593 let shadows = create_pipeline(
594 "shadows",
595 "vs_shadow",
596 "fs_shadow",
597 &layouts.globals,
598 &layouts.instances,
599 wgpu::PrimitiveTopology::TriangleStrip,
600 &[Some(color_target.clone())],
601 1,
602 );
603
604 let path_rasterization = create_pipeline(
605 "path_rasterization",
606 "vs_path_rasterization",
607 "fs_path_rasterization",
608 &layouts.globals,
609 &layouts.instances,
610 wgpu::PrimitiveTopology::TriangleList,
611 &[Some(wgpu::ColorTargetState {
612 format: surface_format,
613 blend: Some(wgpu::BlendState::PREMULTIPLIED_ALPHA_BLENDING),
614 write_mask: wgpu::ColorWrites::ALL,
615 })],
616 path_sample_count,
617 );
618
619 let paths_blend = wgpu::BlendState {
620 color: wgpu::BlendComponent {
621 src_factor: wgpu::BlendFactor::One,
622 dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
623 operation: wgpu::BlendOperation::Add,
624 },
625 alpha: wgpu::BlendComponent {
626 src_factor: wgpu::BlendFactor::One,
627 dst_factor: wgpu::BlendFactor::One,
628 operation: wgpu::BlendOperation::Add,
629 },
630 };
631
632 let paths = create_pipeline(
633 "paths",
634 "vs_path",
635 "fs_path",
636 &layouts.globals,
637 &layouts.instances_with_texture,
638 wgpu::PrimitiveTopology::TriangleStrip,
639 &[Some(wgpu::ColorTargetState {
640 format: surface_format,
641 blend: Some(paths_blend),
642 write_mask: wgpu::ColorWrites::ALL,
643 })],
644 1,
645 );
646
647 let underlines = create_pipeline(
648 "underlines",
649 "vs_underline",
650 "fs_underline",
651 &layouts.globals,
652 &layouts.instances,
653 wgpu::PrimitiveTopology::TriangleStrip,
654 &[Some(color_target.clone())],
655 1,
656 );
657
658 let mono_sprites = create_pipeline(
659 "mono_sprites",
660 "vs_mono_sprite",
661 "fs_mono_sprite",
662 &layouts.globals,
663 &layouts.instances_with_texture,
664 wgpu::PrimitiveTopology::TriangleStrip,
665 &[Some(color_target.clone())],
666 1,
667 );
668
669 let subpixel_sprites = if dual_source_blending {
670 let subpixel_blend = wgpu::BlendState {
671 color: wgpu::BlendComponent {
672 src_factor: wgpu::BlendFactor::Src1,
673 dst_factor: wgpu::BlendFactor::OneMinusSrc1,
674 operation: wgpu::BlendOperation::Add,
675 },
676 alpha: wgpu::BlendComponent {
677 src_factor: wgpu::BlendFactor::One,
678 dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
679 operation: wgpu::BlendOperation::Add,
680 },
681 };
682
683 Some(create_pipeline(
684 "subpixel_sprites",
685 "vs_subpixel_sprite",
686 "fs_subpixel_sprite",
687 &layouts.globals,
688 &layouts.instances_with_texture,
689 wgpu::PrimitiveTopology::TriangleStrip,
690 &[Some(wgpu::ColorTargetState {
691 format: surface_format,
692 blend: Some(subpixel_blend),
693 write_mask: wgpu::ColorWrites::COLOR,
694 })],
695 1,
696 ))
697 } else {
698 None
699 };
700
701 let poly_sprites = create_pipeline(
702 "poly_sprites",
703 "vs_poly_sprite",
704 "fs_poly_sprite",
705 &layouts.globals,
706 &layouts.instances_with_texture,
707 wgpu::PrimitiveTopology::TriangleStrip,
708 &[Some(color_target.clone())],
709 1,
710 );
711
712 let surfaces = create_pipeline(
713 "surfaces",
714 "vs_surface",
715 "fs_surface",
716 &layouts.globals,
717 &layouts.surfaces,
718 wgpu::PrimitiveTopology::TriangleStrip,
719 &[Some(color_target)],
720 1,
721 );
722
723 WgpuPipelines {
724 quads,
725 shadows,
726 path_rasterization,
727 paths,
728 underlines,
729 mono_sprites,
730 subpixel_sprites,
731 poly_sprites,
732 surfaces,
733 }
734 }
735
736 fn create_path_intermediate(
737 device: &wgpu::Device,
738 format: wgpu::TextureFormat,
739 width: u32,
740 height: u32,
741 ) -> (wgpu::Texture, wgpu::TextureView) {
742 let texture = device.create_texture(&wgpu::TextureDescriptor {
743 label: Some("path_intermediate"),
744 size: wgpu::Extent3d {
745 width: width.max(1),
746 height: height.max(1),
747 depth_or_array_layers: 1,
748 },
749 mip_level_count: 1,
750 sample_count: 1,
751 dimension: wgpu::TextureDimension::D2,
752 format,
753 usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::TEXTURE_BINDING,
754 view_formats: &[],
755 });
756 let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
757 (texture, view)
758 }
759
760 fn create_msaa_if_needed(
761 device: &wgpu::Device,
762 format: wgpu::TextureFormat,
763 width: u32,
764 height: u32,
765 sample_count: u32,
766 ) -> Option<(wgpu::Texture, wgpu::TextureView)> {
767 if sample_count <= 1 {
768 return None;
769 }
770 let texture = device.create_texture(&wgpu::TextureDescriptor {
771 label: Some("path_msaa"),
772 size: wgpu::Extent3d {
773 width: width.max(1),
774 height: height.max(1),
775 depth_or_array_layers: 1,
776 },
777 mip_level_count: 1,
778 sample_count,
779 dimension: wgpu::TextureDimension::D2,
780 format,
781 usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
782 view_formats: &[],
783 });
784 let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
785 Some((texture, view))
786 }
787
788 pub fn update_drawable_size(&mut self, size: Size<DevicePixels>) {
789 let width = size.width.0 as u32;
790 let height = size.height.0 as u32;
791
792 if width != self.surface_config.width || height != self.surface_config.height {
793 let clamped_width = width.min(self.max_texture_size);
794 let clamped_height = height.min(self.max_texture_size);
795
796 if clamped_width != width || clamped_height != height {
797 warn!(
798 "Requested surface size ({}, {}) exceeds maximum texture dimension {}. \
799 Clamping to ({}, {}). Window content may not fill the entire window.",
800 width, height, self.max_texture_size, clamped_width, clamped_height
801 );
802 }
803
804 // Wait for any in-flight GPU work to complete before destroying textures
805 if let Err(e) = self.device.poll(wgpu::PollType::Wait {
806 submission_index: None,
807 timeout: None,
808 }) {
809 warn!("Failed to poll device during resize: {e:?}");
810 }
811
812 // Destroy old textures before allocating new ones to avoid GPU memory spikes
813 if let Some(ref texture) = self.path_intermediate_texture {
814 texture.destroy();
815 }
816 if let Some(ref texture) = self.path_msaa_texture {
817 texture.destroy();
818 }
819
820 self.surface_config.width = clamped_width.max(1);
821 self.surface_config.height = clamped_height.max(1);
822 self.surface.configure(&self.device, &self.surface_config);
823
824 // Invalidate intermediate textures - they will be lazily recreated
825 // in draw() after we confirm the surface is healthy. This avoids
826 // panics when the device/surface is in an invalid state during resize.
827 self.path_intermediate_texture = None;
828 self.path_intermediate_view = None;
829 self.path_msaa_texture = None;
830 self.path_msaa_view = None;
831 }
832 }
833
834 fn ensure_intermediate_textures(&mut self) {
835 if self.path_intermediate_texture.is_some() {
836 return;
837 }
838
839 let (path_intermediate_texture, path_intermediate_view) = {
840 let (t, v) = Self::create_path_intermediate(
841 &self.device,
842 self.surface_config.format,
843 self.surface_config.width,
844 self.surface_config.height,
845 );
846 (Some(t), Some(v))
847 };
848 self.path_intermediate_texture = path_intermediate_texture;
849 self.path_intermediate_view = path_intermediate_view;
850
851 let (path_msaa_texture, path_msaa_view) = Self::create_msaa_if_needed(
852 &self.device,
853 self.surface_config.format,
854 self.surface_config.width,
855 self.surface_config.height,
856 self.rendering_params.path_sample_count,
857 )
858 .map(|(t, v)| (Some(t), Some(v)))
859 .unwrap_or((None, None));
860 self.path_msaa_texture = path_msaa_texture;
861 self.path_msaa_view = path_msaa_view;
862 }
863
864 pub fn update_transparency(&mut self, transparent: bool) {
865 let new_alpha_mode = if transparent {
866 self.transparent_alpha_mode
867 } else {
868 self.opaque_alpha_mode
869 };
870
871 if new_alpha_mode != self.surface_config.alpha_mode {
872 self.surface_config.alpha_mode = new_alpha_mode;
873 self.surface.configure(&self.device, &self.surface_config);
874 self.pipelines = Self::create_pipelines(
875 &self.device,
876 &self.bind_group_layouts,
877 self.surface_config.format,
878 self.surface_config.alpha_mode,
879 self.rendering_params.path_sample_count,
880 self.dual_source_blending,
881 );
882 }
883 }
884
885 #[allow(dead_code)]
886 pub fn viewport_size(&self) -> Size<DevicePixels> {
887 Size {
888 width: DevicePixels(self.surface_config.width as i32),
889 height: DevicePixels(self.surface_config.height as i32),
890 }
891 }
892
893 pub fn sprite_atlas(&self) -> &Arc<WgpuAtlas> {
894 &self.atlas
895 }
896
897 pub fn gpu_specs(&self) -> GpuSpecs {
898 GpuSpecs {
899 is_software_emulated: self.adapter_info.device_type == wgpu::DeviceType::Cpu,
900 device_name: self.adapter_info.name.clone(),
901 driver_name: self.adapter_info.driver.clone(),
902 driver_info: self.adapter_info.driver_info.clone(),
903 }
904 }
905
906 pub fn max_texture_size(&self) -> u32 {
907 self.max_texture_size
908 }
909
910 pub fn draw(&mut self, scene: &Scene) {
911 let last_error = self.last_error.lock().unwrap().take();
912 if let Some(error) = last_error {
913 self.failed_frame_count += 1;
914 log::error!(
915 "GPU error during frame (failure {} of 20): {error}",
916 self.failed_frame_count
917 );
918 if self.failed_frame_count > 20 {
919 panic!("Too many consecutive GPU errors. Last error: {error}");
920 }
921 } else {
922 self.failed_frame_count = 0;
923 }
924
925 self.atlas.before_frame();
926
927 let frame = match self.surface.get_current_texture() {
928 Ok(frame) => frame,
929 Err(wgpu::SurfaceError::Lost | wgpu::SurfaceError::Outdated) => {
930 self.surface.configure(&self.device, &self.surface_config);
931 return;
932 }
933 Err(e) => {
934 log::error!("Failed to acquire surface texture: {e}");
935 return;
936 }
937 };
938
939 // Now that we know the surface is healthy, ensure intermediate textures exist
940 self.ensure_intermediate_textures();
941
942 let frame_view = frame
943 .texture
944 .create_view(&wgpu::TextureViewDescriptor::default());
945
946 let gamma_params = GammaParams {
947 gamma_ratios: self.rendering_params.gamma_ratios,
948 grayscale_enhanced_contrast: self.rendering_params.grayscale_enhanced_contrast,
949 subpixel_enhanced_contrast: self.rendering_params.subpixel_enhanced_contrast,
950 _pad: [0.0; 2],
951 };
952
953 let globals = GlobalParams {
954 viewport_size: [
955 self.surface_config.width as f32,
956 self.surface_config.height as f32,
957 ],
958 premultiplied_alpha: if self.surface_config.alpha_mode
959 == wgpu::CompositeAlphaMode::PreMultiplied
960 {
961 1
962 } else {
963 0
964 },
965 pad: 0,
966 };
967
968 let path_globals = GlobalParams {
969 premultiplied_alpha: 0,
970 ..globals
971 };
972
973 self.queue
974 .write_buffer(&self.globals_buffer, 0, bytemuck::bytes_of(&globals));
975 self.queue.write_buffer(
976 &self.globals_buffer,
977 self.path_globals_offset,
978 bytemuck::bytes_of(&path_globals),
979 );
980 self.queue.write_buffer(
981 &self.globals_buffer,
982 self.gamma_offset,
983 bytemuck::bytes_of(&gamma_params),
984 );
985
986 loop {
987 let mut instance_offset: u64 = 0;
988 let mut overflow = false;
989
990 let mut encoder = self
991 .device
992 .create_command_encoder(&wgpu::CommandEncoderDescriptor {
993 label: Some("main_encoder"),
994 });
995
996 {
997 let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
998 label: Some("main_pass"),
999 color_attachments: &[Some(wgpu::RenderPassColorAttachment {
1000 view: &frame_view,
1001 resolve_target: None,
1002 ops: wgpu::Operations {
1003 load: wgpu::LoadOp::Clear(wgpu::Color::TRANSPARENT),
1004 store: wgpu::StoreOp::Store,
1005 },
1006 depth_slice: None,
1007 })],
1008 depth_stencil_attachment: None,
1009 ..Default::default()
1010 });
1011
1012 for batch in scene.batches() {
1013 let ok = match batch {
1014 PrimitiveBatch::Quads(range) => {
1015 self.draw_quads(&scene.quads[range], &mut instance_offset, &mut pass)
1016 }
1017 PrimitiveBatch::Shadows(range) => self.draw_shadows(
1018 &scene.shadows[range],
1019 &mut instance_offset,
1020 &mut pass,
1021 ),
1022 PrimitiveBatch::Paths(range) => {
1023 let paths = &scene.paths[range];
1024 if paths.is_empty() {
1025 continue;
1026 }
1027
1028 drop(pass);
1029
1030 let did_draw = self.draw_paths_to_intermediate(
1031 &mut encoder,
1032 paths,
1033 &mut instance_offset,
1034 );
1035
1036 pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
1037 label: Some("main_pass_continued"),
1038 color_attachments: &[Some(wgpu::RenderPassColorAttachment {
1039 view: &frame_view,
1040 resolve_target: None,
1041 ops: wgpu::Operations {
1042 load: wgpu::LoadOp::Load,
1043 store: wgpu::StoreOp::Store,
1044 },
1045 depth_slice: None,
1046 })],
1047 depth_stencil_attachment: None,
1048 ..Default::default()
1049 });
1050
1051 if did_draw {
1052 self.draw_paths_from_intermediate(
1053 paths,
1054 &mut instance_offset,
1055 &mut pass,
1056 )
1057 } else {
1058 false
1059 }
1060 }
1061 PrimitiveBatch::Underlines(range) => self.draw_underlines(
1062 &scene.underlines[range],
1063 &mut instance_offset,
1064 &mut pass,
1065 ),
1066 PrimitiveBatch::MonochromeSprites { texture_id, range } => self
1067 .draw_monochrome_sprites(
1068 &scene.monochrome_sprites[range],
1069 texture_id,
1070 &mut instance_offset,
1071 &mut pass,
1072 ),
1073 PrimitiveBatch::SubpixelSprites { texture_id, range } => self
1074 .draw_subpixel_sprites(
1075 &scene.subpixel_sprites[range],
1076 texture_id,
1077 &mut instance_offset,
1078 &mut pass,
1079 ),
1080 PrimitiveBatch::PolychromeSprites { texture_id, range } => self
1081 .draw_polychrome_sprites(
1082 &scene.polychrome_sprites[range],
1083 texture_id,
1084 &mut instance_offset,
1085 &mut pass,
1086 ),
1087 PrimitiveBatch::Surfaces(_surfaces) => {
1088 // Surfaces are macOS-only for video playback
1089 // Not implemented for Linux/wgpu
1090 true
1091 }
1092 };
1093 if !ok {
1094 overflow = true;
1095 break;
1096 }
1097 }
1098 }
1099
1100 if overflow {
1101 drop(encoder);
1102 if self.instance_buffer_capacity >= 256 * 1024 * 1024 {
1103 log::error!(
1104 "instance buffer size grew too large: {}",
1105 self.instance_buffer_capacity
1106 );
1107 frame.present();
1108 return;
1109 }
1110 self.grow_instance_buffer();
1111 continue;
1112 }
1113
1114 self.queue.submit(std::iter::once(encoder.finish()));
1115 frame.present();
1116 return;
1117 }
1118 }
1119
1120 fn draw_quads(
1121 &self,
1122 quads: &[Quad],
1123 instance_offset: &mut u64,
1124 pass: &mut wgpu::RenderPass<'_>,
1125 ) -> bool {
1126 let data = unsafe { Self::instance_bytes(quads) };
1127 self.draw_instances(
1128 data,
1129 quads.len() as u32,
1130 &self.pipelines.quads,
1131 instance_offset,
1132 pass,
1133 )
1134 }
1135
1136 fn draw_shadows(
1137 &self,
1138 shadows: &[Shadow],
1139 instance_offset: &mut u64,
1140 pass: &mut wgpu::RenderPass<'_>,
1141 ) -> bool {
1142 let data = unsafe { Self::instance_bytes(shadows) };
1143 self.draw_instances(
1144 data,
1145 shadows.len() as u32,
1146 &self.pipelines.shadows,
1147 instance_offset,
1148 pass,
1149 )
1150 }
1151
1152 fn draw_underlines(
1153 &self,
1154 underlines: &[Underline],
1155 instance_offset: &mut u64,
1156 pass: &mut wgpu::RenderPass<'_>,
1157 ) -> bool {
1158 let data = unsafe { Self::instance_bytes(underlines) };
1159 self.draw_instances(
1160 data,
1161 underlines.len() as u32,
1162 &self.pipelines.underlines,
1163 instance_offset,
1164 pass,
1165 )
1166 }
1167
1168 fn draw_monochrome_sprites(
1169 &self,
1170 sprites: &[MonochromeSprite],
1171 texture_id: AtlasTextureId,
1172 instance_offset: &mut u64,
1173 pass: &mut wgpu::RenderPass<'_>,
1174 ) -> bool {
1175 let tex_info = self.atlas.get_texture_info(texture_id);
1176 let data = unsafe { Self::instance_bytes(sprites) };
1177 self.draw_instances_with_texture(
1178 data,
1179 sprites.len() as u32,
1180 &tex_info.view,
1181 &self.pipelines.mono_sprites,
1182 instance_offset,
1183 pass,
1184 )
1185 }
1186
1187 fn draw_subpixel_sprites(
1188 &self,
1189 sprites: &[SubpixelSprite],
1190 texture_id: AtlasTextureId,
1191 instance_offset: &mut u64,
1192 pass: &mut wgpu::RenderPass<'_>,
1193 ) -> bool {
1194 let tex_info = self.atlas.get_texture_info(texture_id);
1195 let data = unsafe { Self::instance_bytes(sprites) };
1196 let pipeline = self
1197 .pipelines
1198 .subpixel_sprites
1199 .as_ref()
1200 .unwrap_or(&self.pipelines.mono_sprites);
1201 self.draw_instances_with_texture(
1202 data,
1203 sprites.len() as u32,
1204 &tex_info.view,
1205 pipeline,
1206 instance_offset,
1207 pass,
1208 )
1209 }
1210
1211 fn draw_polychrome_sprites(
1212 &self,
1213 sprites: &[PolychromeSprite],
1214 texture_id: AtlasTextureId,
1215 instance_offset: &mut u64,
1216 pass: &mut wgpu::RenderPass<'_>,
1217 ) -> bool {
1218 let tex_info = self.atlas.get_texture_info(texture_id);
1219 let data = unsafe { Self::instance_bytes(sprites) };
1220 self.draw_instances_with_texture(
1221 data,
1222 sprites.len() as u32,
1223 &tex_info.view,
1224 &self.pipelines.poly_sprites,
1225 instance_offset,
1226 pass,
1227 )
1228 }
1229
1230 fn draw_instances(
1231 &self,
1232 data: &[u8],
1233 instance_count: u32,
1234 pipeline: &wgpu::RenderPipeline,
1235 instance_offset: &mut u64,
1236 pass: &mut wgpu::RenderPass<'_>,
1237 ) -> bool {
1238 if instance_count == 0 {
1239 return true;
1240 }
1241 let Some((offset, size)) = self.write_to_instance_buffer(instance_offset, data) else {
1242 return false;
1243 };
1244 let bind_group = self.device.create_bind_group(&wgpu::BindGroupDescriptor {
1245 label: None,
1246 layout: &self.bind_group_layouts.instances,
1247 entries: &[wgpu::BindGroupEntry {
1248 binding: 0,
1249 resource: self.instance_binding(offset, size),
1250 }],
1251 });
1252 pass.set_pipeline(pipeline);
1253 pass.set_bind_group(0, &self.globals_bind_group, &[]);
1254 pass.set_bind_group(1, &bind_group, &[]);
1255 pass.draw(0..4, 0..instance_count);
1256 true
1257 }
1258
1259 fn draw_instances_with_texture(
1260 &self,
1261 data: &[u8],
1262 instance_count: u32,
1263 texture_view: &wgpu::TextureView,
1264 pipeline: &wgpu::RenderPipeline,
1265 instance_offset: &mut u64,
1266 pass: &mut wgpu::RenderPass<'_>,
1267 ) -> bool {
1268 if instance_count == 0 {
1269 return true;
1270 }
1271 let Some((offset, size)) = self.write_to_instance_buffer(instance_offset, data) else {
1272 return false;
1273 };
1274 let bind_group = self.device.create_bind_group(&wgpu::BindGroupDescriptor {
1275 label: None,
1276 layout: &self.bind_group_layouts.instances_with_texture,
1277 entries: &[
1278 wgpu::BindGroupEntry {
1279 binding: 0,
1280 resource: self.instance_binding(offset, size),
1281 },
1282 wgpu::BindGroupEntry {
1283 binding: 1,
1284 resource: wgpu::BindingResource::TextureView(texture_view),
1285 },
1286 wgpu::BindGroupEntry {
1287 binding: 2,
1288 resource: wgpu::BindingResource::Sampler(&self.atlas_sampler),
1289 },
1290 ],
1291 });
1292 pass.set_pipeline(pipeline);
1293 pass.set_bind_group(0, &self.globals_bind_group, &[]);
1294 pass.set_bind_group(1, &bind_group, &[]);
1295 pass.draw(0..4, 0..instance_count);
1296 true
1297 }
1298
1299 unsafe fn instance_bytes<T>(instances: &[T]) -> &[u8] {
1300 unsafe {
1301 std::slice::from_raw_parts(
1302 instances.as_ptr() as *const u8,
1303 std::mem::size_of_val(instances),
1304 )
1305 }
1306 }
1307
1308 fn draw_paths_from_intermediate(
1309 &self,
1310 paths: &[Path<ScaledPixels>],
1311 instance_offset: &mut u64,
1312 pass: &mut wgpu::RenderPass<'_>,
1313 ) -> bool {
1314 let first_path = &paths[0];
1315 let sprites: Vec<PathSprite> = if paths.last().map(|p| &p.order) == Some(&first_path.order)
1316 {
1317 paths
1318 .iter()
1319 .map(|p| PathSprite {
1320 bounds: p.clipped_bounds(),
1321 })
1322 .collect()
1323 } else {
1324 let mut bounds = first_path.clipped_bounds();
1325 for path in paths.iter().skip(1) {
1326 bounds = bounds.union(&path.clipped_bounds());
1327 }
1328 vec![PathSprite { bounds }]
1329 };
1330
1331 let Some(path_intermediate_view) = self.path_intermediate_view.as_ref() else {
1332 return true;
1333 };
1334
1335 let sprite_data = unsafe { Self::instance_bytes(&sprites) };
1336 self.draw_instances_with_texture(
1337 sprite_data,
1338 sprites.len() as u32,
1339 path_intermediate_view,
1340 &self.pipelines.paths,
1341 instance_offset,
1342 pass,
1343 )
1344 }
1345
1346 fn draw_paths_to_intermediate(
1347 &self,
1348 encoder: &mut wgpu::CommandEncoder,
1349 paths: &[Path<ScaledPixels>],
1350 instance_offset: &mut u64,
1351 ) -> bool {
1352 let mut vertices = Vec::new();
1353 for path in paths {
1354 let bounds = path.clipped_bounds();
1355 vertices.extend(path.vertices.iter().map(|v| PathRasterizationVertex {
1356 xy_position: v.xy_position,
1357 st_position: v.st_position,
1358 color: path.color,
1359 bounds,
1360 }));
1361 }
1362
1363 if vertices.is_empty() {
1364 return true;
1365 }
1366
1367 let vertex_data = unsafe { Self::instance_bytes(&vertices) };
1368 let Some((vertex_offset, vertex_size)) =
1369 self.write_to_instance_buffer(instance_offset, vertex_data)
1370 else {
1371 return false;
1372 };
1373
1374 let data_bind_group = self.device.create_bind_group(&wgpu::BindGroupDescriptor {
1375 label: Some("path_rasterization_bind_group"),
1376 layout: &self.bind_group_layouts.instances,
1377 entries: &[wgpu::BindGroupEntry {
1378 binding: 0,
1379 resource: self.instance_binding(vertex_offset, vertex_size),
1380 }],
1381 });
1382
1383 let Some(path_intermediate_view) = self.path_intermediate_view.as_ref() else {
1384 return true;
1385 };
1386
1387 let (target_view, resolve_target) = if let Some(ref msaa_view) = self.path_msaa_view {
1388 (msaa_view, Some(path_intermediate_view))
1389 } else {
1390 (path_intermediate_view, None)
1391 };
1392
1393 {
1394 let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
1395 label: Some("path_rasterization_pass"),
1396 color_attachments: &[Some(wgpu::RenderPassColorAttachment {
1397 view: target_view,
1398 resolve_target,
1399 ops: wgpu::Operations {
1400 load: wgpu::LoadOp::Clear(wgpu::Color::TRANSPARENT),
1401 store: wgpu::StoreOp::Store,
1402 },
1403 depth_slice: None,
1404 })],
1405 depth_stencil_attachment: None,
1406 ..Default::default()
1407 });
1408
1409 pass.set_pipeline(&self.pipelines.path_rasterization);
1410 pass.set_bind_group(0, &self.path_globals_bind_group, &[]);
1411 pass.set_bind_group(1, &data_bind_group, &[]);
1412 pass.draw(0..vertices.len() as u32, 0..1);
1413 }
1414
1415 true
1416 }
1417
1418 fn grow_instance_buffer(&mut self) {
1419 let new_capacity = self.instance_buffer_capacity * 2;
1420 log::info!("increased instance buffer size to {}", new_capacity);
1421 self.instance_buffer = self.device.create_buffer(&wgpu::BufferDescriptor {
1422 label: Some("instance_buffer"),
1423 size: new_capacity,
1424 usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
1425 mapped_at_creation: false,
1426 });
1427 self.instance_buffer_capacity = new_capacity;
1428 }
1429
1430 fn write_to_instance_buffer(
1431 &self,
1432 instance_offset: &mut u64,
1433 data: &[u8],
1434 ) -> Option<(u64, NonZeroU64)> {
1435 let offset = (*instance_offset).next_multiple_of(self.storage_buffer_alignment);
1436 let size = (data.len() as u64).max(16);
1437 if offset + size > self.instance_buffer_capacity {
1438 return None;
1439 }
1440 self.queue.write_buffer(&self.instance_buffer, offset, data);
1441 *instance_offset = offset + size;
1442 Some((offset, NonZeroU64::new(size).expect("size is at least 16")))
1443 }
1444
1445 fn instance_binding(&self, offset: u64, size: NonZeroU64) -> wgpu::BindingResource<'_> {
1446 wgpu::BindingResource::Buffer(wgpu::BufferBinding {
1447 buffer: &self.instance_buffer,
1448 offset,
1449 size: Some(size),
1450 })
1451 }
1452
1453 pub fn destroy(&mut self) {
1454 // wgpu resources are automatically cleaned up when dropped
1455 }
1456}
1457
1458struct RenderingParameters {
1459 path_sample_count: u32,
1460 gamma_ratios: [f32; 4],
1461 grayscale_enhanced_contrast: f32,
1462 subpixel_enhanced_contrast: f32,
1463}
1464
1465impl RenderingParameters {
1466 fn new(adapter: &wgpu::Adapter, surface_format: wgpu::TextureFormat) -> Self {
1467 use std::env;
1468
1469 let format_features = adapter.get_texture_format_features(surface_format);
1470 let path_sample_count = [4, 2, 1]
1471 .into_iter()
1472 .find(|&n| format_features.flags.sample_count_supported(n))
1473 .unwrap_or(1);
1474
1475 let gamma = env::var("ZED_FONTS_GAMMA")
1476 .ok()
1477 .and_then(|v| v.parse().ok())
1478 .unwrap_or(1.8_f32)
1479 .clamp(1.0, 2.2);
1480 let gamma_ratios = get_gamma_correction_ratios(gamma);
1481
1482 let grayscale_enhanced_contrast = env::var("ZED_FONTS_GRAYSCALE_ENHANCED_CONTRAST")
1483 .ok()
1484 .and_then(|v| v.parse().ok())
1485 .unwrap_or(1.0_f32)
1486 .max(0.0);
1487
1488 let subpixel_enhanced_contrast = env::var("ZED_FONTS_SUBPIXEL_ENHANCED_CONTRAST")
1489 .ok()
1490 .and_then(|v| v.parse().ok())
1491 .unwrap_or(0.5_f32)
1492 .max(0.0);
1493
1494 Self {
1495 path_sample_count,
1496 gamma_ratios,
1497 grayscale_enhanced_contrast,
1498 subpixel_enhanced_contrast,
1499 }
1500 }
1501}