1#[cfg(not(target_family = "wasm"))]
2use crate::CompositorGpuHint;
3use crate::{WgpuAtlas, WgpuContext};
4use bytemuck::{Pod, Zeroable};
5use gpui::{
6 AtlasTextureId, Background, Bounds, DevicePixels, GpuSpecs, MonochromeSprite, Path, Point,
7 PolychromeSprite, PrimitiveBatch, Quad, ScaledPixels, Scene, Shadow, Size, SubpixelSprite,
8 Underline, get_gamma_correction_ratios,
9};
10use log::warn;
11use raw_window_handle::{HasDisplayHandle, HasWindowHandle};
12use std::num::NonZeroU64;
13use std::sync::{Arc, Mutex};
14
15#[repr(C)]
16#[derive(Clone, Copy, Pod, Zeroable)]
17struct GlobalParams {
18 viewport_size: [f32; 2],
19 premultiplied_alpha: u32,
20 pad: u32,
21}
22
23#[repr(C)]
24#[derive(Clone, Copy, Pod, Zeroable)]
25struct PodBounds {
26 origin: [f32; 2],
27 size: [f32; 2],
28}
29
30impl From<Bounds<ScaledPixels>> for PodBounds {
31 fn from(bounds: Bounds<ScaledPixels>) -> Self {
32 Self {
33 origin: [bounds.origin.x.0, bounds.origin.y.0],
34 size: [bounds.size.width.0, bounds.size.height.0],
35 }
36 }
37}
38
39#[repr(C)]
40#[derive(Clone, Copy, Pod, Zeroable)]
41struct SurfaceParams {
42 bounds: PodBounds,
43 content_mask: PodBounds,
44}
45
46#[repr(C)]
47#[derive(Clone, Copy, Pod, Zeroable)]
48struct GammaParams {
49 gamma_ratios: [f32; 4],
50 grayscale_enhanced_contrast: f32,
51 subpixel_enhanced_contrast: f32,
52 _pad: [f32; 2],
53}
54
55#[derive(Clone, Debug)]
56#[repr(C)]
57struct PathSprite {
58 bounds: Bounds<ScaledPixels>,
59}
60
61#[derive(Clone, Debug)]
62#[repr(C)]
63struct PathRasterizationVertex {
64 xy_position: Point<ScaledPixels>,
65 st_position: Point<f32>,
66 color: Background,
67 bounds: Bounds<ScaledPixels>,
68}
69
70pub struct WgpuSurfaceConfig {
71 pub size: Size<DevicePixels>,
72 pub transparent: bool,
73}
74
75struct WgpuPipelines {
76 quads: wgpu::RenderPipeline,
77 shadows: wgpu::RenderPipeline,
78 path_rasterization: wgpu::RenderPipeline,
79 paths: wgpu::RenderPipeline,
80 underlines: wgpu::RenderPipeline,
81 mono_sprites: wgpu::RenderPipeline,
82 subpixel_sprites: Option<wgpu::RenderPipeline>,
83 poly_sprites: wgpu::RenderPipeline,
84 #[allow(dead_code)]
85 surfaces: wgpu::RenderPipeline,
86}
87
88struct WgpuBindGroupLayouts {
89 globals: wgpu::BindGroupLayout,
90 instances: wgpu::BindGroupLayout,
91 instances_with_texture: wgpu::BindGroupLayout,
92 surfaces: wgpu::BindGroupLayout,
93}
94
95pub struct WgpuRenderer {
96 device: Arc<wgpu::Device>,
97 queue: Arc<wgpu::Queue>,
98 surface: wgpu::Surface<'static>,
99 surface_config: wgpu::SurfaceConfiguration,
100 surface_configured: bool,
101 pipelines: WgpuPipelines,
102 bind_group_layouts: WgpuBindGroupLayouts,
103 atlas: Arc<WgpuAtlas>,
104 atlas_sampler: wgpu::Sampler,
105 globals_buffer: wgpu::Buffer,
106 path_globals_offset: u64,
107 gamma_offset: u64,
108 globals_bind_group: wgpu::BindGroup,
109 path_globals_bind_group: wgpu::BindGroup,
110 instance_buffer: wgpu::Buffer,
111 instance_buffer_capacity: u64,
112 storage_buffer_alignment: u64,
113 path_intermediate_texture: Option<wgpu::Texture>,
114 path_intermediate_view: Option<wgpu::TextureView>,
115 path_msaa_texture: Option<wgpu::Texture>,
116 path_msaa_view: Option<wgpu::TextureView>,
117 rendering_params: RenderingParameters,
118 dual_source_blending: bool,
119 adapter_info: wgpu::AdapterInfo,
120 transparent_alpha_mode: wgpu::CompositeAlphaMode,
121 opaque_alpha_mode: wgpu::CompositeAlphaMode,
122 max_texture_size: u32,
123 last_error: Arc<Mutex<Option<String>>>,
124 failed_frame_count: u32,
125}
126
127impl WgpuRenderer {
128 /// Creates a new WgpuRenderer from raw window handles.
129 ///
130 /// # Safety
131 /// The caller must ensure that the window handle remains valid for the lifetime
132 /// of the returned renderer.
133 pub fn new<W: HasWindowHandle + HasDisplayHandle>(
134 gpu_context: &mut Option<WgpuContext>,
135 window: &W,
136 config: WgpuSurfaceConfig,
137 compositor_gpu: Option<CompositorGpuHint>,
138 ) -> anyhow::Result<Self> {
139 let window_handle = window
140 .window_handle()
141 .map_err(|e| anyhow::anyhow!("Failed to get window handle: {e}"))?;
142 let display_handle = window
143 .display_handle()
144 .map_err(|e| anyhow::anyhow!("Failed to get display handle: {e}"))?;
145
146 let target = wgpu::SurfaceTargetUnsafe::RawHandle {
147 raw_display_handle: display_handle.as_raw(),
148 raw_window_handle: window_handle.as_raw(),
149 };
150
151 // Use the existing context's instance if available, otherwise create a new one.
152 // The surface must be created with the same instance that will be used for
153 // adapter selection, otherwise wgpu will panic.
154 let instance = gpu_context
155 .as_ref()
156 .map(|ctx| ctx.instance.clone())
157 .unwrap_or_else(WgpuContext::instance);
158
159 // Safety: The caller guarantees that the window handle is valid for the
160 // lifetime of this renderer. In practice, the RawWindow struct is created
161 // from the native window handles and the surface is dropped before the window.
162 let surface = unsafe {
163 instance
164 .create_surface_unsafe(target)
165 .map_err(|e| anyhow::anyhow!("Failed to create surface: {e}"))?
166 };
167
168 let context = match gpu_context {
169 Some(context) => {
170 context.check_compatible_with_surface(&surface)?;
171 context
172 }
173 None => gpu_context.insert(WgpuContext::new(instance, &surface, compositor_gpu)?),
174 };
175
176 Self::new_with_surface(context, surface, config)
177 }
178
179 fn new_with_surface(
180 context: &WgpuContext,
181 surface: wgpu::Surface<'static>,
182 config: WgpuSurfaceConfig,
183 ) -> anyhow::Result<Self> {
184 let surface_caps = surface.get_capabilities(&context.adapter);
185 let preferred_formats = [
186 wgpu::TextureFormat::Bgra8Unorm,
187 wgpu::TextureFormat::Rgba8Unorm,
188 ];
189 let surface_format = preferred_formats
190 .iter()
191 .find(|f| surface_caps.formats.contains(f))
192 .copied()
193 .or_else(|| surface_caps.formats.iter().find(|f| !f.is_srgb()).copied())
194 .or_else(|| surface_caps.formats.first().copied())
195 .ok_or_else(|| {
196 anyhow::anyhow!(
197 "Surface reports no supported texture formats for adapter {:?}",
198 context.adapter.get_info().name
199 )
200 })?;
201
202 let pick_alpha_mode =
203 |preferences: &[wgpu::CompositeAlphaMode]| -> anyhow::Result<wgpu::CompositeAlphaMode> {
204 preferences
205 .iter()
206 .find(|p| surface_caps.alpha_modes.contains(p))
207 .copied()
208 .or_else(|| surface_caps.alpha_modes.first().copied())
209 .ok_or_else(|| {
210 anyhow::anyhow!(
211 "Surface reports no supported alpha modes for adapter {:?}",
212 context.adapter.get_info().name
213 )
214 })
215 };
216
217 let transparent_alpha_mode = pick_alpha_mode(&[
218 wgpu::CompositeAlphaMode::PreMultiplied,
219 wgpu::CompositeAlphaMode::Inherit,
220 ])?;
221
222 let opaque_alpha_mode = pick_alpha_mode(&[
223 wgpu::CompositeAlphaMode::Opaque,
224 wgpu::CompositeAlphaMode::Inherit,
225 ])?;
226
227 let alpha_mode = if config.transparent {
228 transparent_alpha_mode
229 } else {
230 opaque_alpha_mode
231 };
232
233 let device = Arc::clone(&context.device);
234 let max_texture_size = device.limits().max_texture_dimension_2d;
235
236 let requested_width = config.size.width.0 as u32;
237 let requested_height = config.size.height.0 as u32;
238 let clamped_width = requested_width.min(max_texture_size);
239 let clamped_height = requested_height.min(max_texture_size);
240
241 if clamped_width != requested_width || clamped_height != requested_height {
242 warn!(
243 "Requested surface size ({}, {}) exceeds maximum texture dimension {}. \
244 Clamping to ({}, {}). Window content may not fill the entire window.",
245 requested_width, requested_height, max_texture_size, clamped_width, clamped_height
246 );
247 }
248
249 let surface_config = wgpu::SurfaceConfiguration {
250 usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
251 format: surface_format,
252 width: clamped_width.max(1),
253 height: clamped_height.max(1),
254 present_mode: wgpu::PresentMode::Fifo,
255 desired_maximum_frame_latency: 2,
256 alpha_mode,
257 view_formats: vec![],
258 };
259 // Configure the surface immediately. The adapter selection process already validated
260 // that this adapter can successfully configure this surface.
261 surface.configure(&context.device, &surface_config);
262
263 let queue = Arc::clone(&context.queue);
264 let dual_source_blending = context.supports_dual_source_blending();
265
266 let rendering_params = RenderingParameters::new(&context.adapter, surface_format);
267 let bind_group_layouts = Self::create_bind_group_layouts(&device);
268 let pipelines = Self::create_pipelines(
269 &device,
270 &bind_group_layouts,
271 surface_format,
272 alpha_mode,
273 rendering_params.path_sample_count,
274 dual_source_blending,
275 );
276
277 let atlas = Arc::new(WgpuAtlas::new(Arc::clone(&device), Arc::clone(&queue)));
278 let atlas_sampler = device.create_sampler(&wgpu::SamplerDescriptor {
279 label: Some("atlas_sampler"),
280 mag_filter: wgpu::FilterMode::Linear,
281 min_filter: wgpu::FilterMode::Linear,
282 ..Default::default()
283 });
284
285 let uniform_alignment = device.limits().min_uniform_buffer_offset_alignment as u64;
286 let globals_size = std::mem::size_of::<GlobalParams>() as u64;
287 let gamma_size = std::mem::size_of::<GammaParams>() as u64;
288 let path_globals_offset = globals_size.next_multiple_of(uniform_alignment);
289 let gamma_offset = (path_globals_offset + globals_size).next_multiple_of(uniform_alignment);
290
291 let globals_buffer = device.create_buffer(&wgpu::BufferDescriptor {
292 label: Some("globals_buffer"),
293 size: gamma_offset + gamma_size,
294 usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
295 mapped_at_creation: false,
296 });
297
298 let storage_buffer_alignment = device.limits().min_storage_buffer_offset_alignment as u64;
299 let initial_instance_buffer_capacity = 2 * 1024 * 1024;
300 let instance_buffer = device.create_buffer(&wgpu::BufferDescriptor {
301 label: Some("instance_buffer"),
302 size: initial_instance_buffer_capacity,
303 usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
304 mapped_at_creation: false,
305 });
306
307 let globals_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
308 label: Some("globals_bind_group"),
309 layout: &bind_group_layouts.globals,
310 entries: &[
311 wgpu::BindGroupEntry {
312 binding: 0,
313 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
314 buffer: &globals_buffer,
315 offset: 0,
316 size: Some(NonZeroU64::new(globals_size).unwrap()),
317 }),
318 },
319 wgpu::BindGroupEntry {
320 binding: 1,
321 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
322 buffer: &globals_buffer,
323 offset: gamma_offset,
324 size: Some(NonZeroU64::new(gamma_size).unwrap()),
325 }),
326 },
327 ],
328 });
329
330 let path_globals_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
331 label: Some("path_globals_bind_group"),
332 layout: &bind_group_layouts.globals,
333 entries: &[
334 wgpu::BindGroupEntry {
335 binding: 0,
336 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
337 buffer: &globals_buffer,
338 offset: path_globals_offset,
339 size: Some(NonZeroU64::new(globals_size).unwrap()),
340 }),
341 },
342 wgpu::BindGroupEntry {
343 binding: 1,
344 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
345 buffer: &globals_buffer,
346 offset: gamma_offset,
347 size: Some(NonZeroU64::new(gamma_size).unwrap()),
348 }),
349 },
350 ],
351 });
352
353 let adapter_info = context.adapter.get_info();
354
355 let last_error: Arc<Mutex<Option<String>>> = Arc::new(Mutex::new(None));
356 let last_error_clone = Arc::clone(&last_error);
357 device.on_uncaptured_error(Arc::new(move |error| {
358 let mut guard = last_error_clone.lock().unwrap();
359 *guard = Some(error.to_string());
360 }));
361
362 Ok(Self {
363 device,
364 queue,
365 surface,
366 surface_config,
367 surface_configured: true,
368 pipelines,
369 bind_group_layouts,
370 atlas,
371 atlas_sampler,
372 globals_buffer,
373 path_globals_offset,
374 gamma_offset,
375 globals_bind_group,
376 path_globals_bind_group,
377 instance_buffer,
378 instance_buffer_capacity: initial_instance_buffer_capacity,
379 storage_buffer_alignment,
380 // Defer intermediate texture creation to first draw call via ensure_intermediate_textures().
381 // This avoids panics when the device/surface is in an invalid state during initialization.
382 path_intermediate_texture: None,
383 path_intermediate_view: None,
384 path_msaa_texture: None,
385 path_msaa_view: None,
386 rendering_params,
387 dual_source_blending,
388 adapter_info,
389 transparent_alpha_mode,
390 opaque_alpha_mode,
391 max_texture_size,
392 last_error,
393 failed_frame_count: 0,
394 })
395 }
396
397 fn create_bind_group_layouts(device: &wgpu::Device) -> WgpuBindGroupLayouts {
398 let globals =
399 device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
400 label: Some("globals_layout"),
401 entries: &[
402 wgpu::BindGroupLayoutEntry {
403 binding: 0,
404 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
405 ty: wgpu::BindingType::Buffer {
406 ty: wgpu::BufferBindingType::Uniform,
407 has_dynamic_offset: false,
408 min_binding_size: NonZeroU64::new(
409 std::mem::size_of::<GlobalParams>() as u64
410 ),
411 },
412 count: None,
413 },
414 wgpu::BindGroupLayoutEntry {
415 binding: 1,
416 visibility: wgpu::ShaderStages::FRAGMENT,
417 ty: wgpu::BindingType::Buffer {
418 ty: wgpu::BufferBindingType::Uniform,
419 has_dynamic_offset: false,
420 min_binding_size: NonZeroU64::new(
421 std::mem::size_of::<GammaParams>() as u64
422 ),
423 },
424 count: None,
425 },
426 ],
427 });
428
429 let storage_buffer_entry = |binding: u32| wgpu::BindGroupLayoutEntry {
430 binding,
431 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
432 ty: wgpu::BindingType::Buffer {
433 ty: wgpu::BufferBindingType::Storage { read_only: true },
434 has_dynamic_offset: false,
435 min_binding_size: None,
436 },
437 count: None,
438 };
439
440 let instances = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
441 label: Some("instances_layout"),
442 entries: &[storage_buffer_entry(0)],
443 });
444
445 let instances_with_texture =
446 device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
447 label: Some("instances_with_texture_layout"),
448 entries: &[
449 storage_buffer_entry(0),
450 wgpu::BindGroupLayoutEntry {
451 binding: 1,
452 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
453 ty: wgpu::BindingType::Texture {
454 sample_type: wgpu::TextureSampleType::Float { filterable: true },
455 view_dimension: wgpu::TextureViewDimension::D2,
456 multisampled: false,
457 },
458 count: None,
459 },
460 wgpu::BindGroupLayoutEntry {
461 binding: 2,
462 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
463 ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
464 count: None,
465 },
466 ],
467 });
468
469 let surfaces = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
470 label: Some("surfaces_layout"),
471 entries: &[
472 wgpu::BindGroupLayoutEntry {
473 binding: 0,
474 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
475 ty: wgpu::BindingType::Buffer {
476 ty: wgpu::BufferBindingType::Uniform,
477 has_dynamic_offset: false,
478 min_binding_size: NonZeroU64::new(
479 std::mem::size_of::<SurfaceParams>() as u64
480 ),
481 },
482 count: None,
483 },
484 wgpu::BindGroupLayoutEntry {
485 binding: 1,
486 visibility: wgpu::ShaderStages::FRAGMENT,
487 ty: wgpu::BindingType::Texture {
488 sample_type: wgpu::TextureSampleType::Float { filterable: true },
489 view_dimension: wgpu::TextureViewDimension::D2,
490 multisampled: false,
491 },
492 count: None,
493 },
494 wgpu::BindGroupLayoutEntry {
495 binding: 2,
496 visibility: wgpu::ShaderStages::FRAGMENT,
497 ty: wgpu::BindingType::Texture {
498 sample_type: wgpu::TextureSampleType::Float { filterable: true },
499 view_dimension: wgpu::TextureViewDimension::D2,
500 multisampled: false,
501 },
502 count: None,
503 },
504 wgpu::BindGroupLayoutEntry {
505 binding: 3,
506 visibility: wgpu::ShaderStages::FRAGMENT,
507 ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
508 count: None,
509 },
510 ],
511 });
512
513 WgpuBindGroupLayouts {
514 globals,
515 instances,
516 instances_with_texture,
517 surfaces,
518 }
519 }
520
521 fn create_pipelines(
522 device: &wgpu::Device,
523 layouts: &WgpuBindGroupLayouts,
524 surface_format: wgpu::TextureFormat,
525 alpha_mode: wgpu::CompositeAlphaMode,
526 path_sample_count: u32,
527 dual_source_blending: bool,
528 ) -> WgpuPipelines {
529 let shader_source = include_str!("shaders.wgsl");
530 let shader_module = device.create_shader_module(wgpu::ShaderModuleDescriptor {
531 label: Some("gpui_shaders"),
532 source: wgpu::ShaderSource::Wgsl(shader_source.into()),
533 });
534
535 let blend_mode = match alpha_mode {
536 wgpu::CompositeAlphaMode::PreMultiplied => {
537 wgpu::BlendState::PREMULTIPLIED_ALPHA_BLENDING
538 }
539 _ => wgpu::BlendState::ALPHA_BLENDING,
540 };
541
542 let color_target = wgpu::ColorTargetState {
543 format: surface_format,
544 blend: Some(blend_mode),
545 write_mask: wgpu::ColorWrites::ALL,
546 };
547
548 let create_pipeline = |name: &str,
549 vs_entry: &str,
550 fs_entry: &str,
551 globals_layout: &wgpu::BindGroupLayout,
552 data_layout: &wgpu::BindGroupLayout,
553 topology: wgpu::PrimitiveTopology,
554 color_targets: &[Option<wgpu::ColorTargetState>],
555 sample_count: u32| {
556 let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
557 label: Some(&format!("{name}_layout")),
558 bind_group_layouts: &[globals_layout, data_layout],
559 immediate_size: 0,
560 });
561
562 device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
563 label: Some(name),
564 layout: Some(&pipeline_layout),
565 vertex: wgpu::VertexState {
566 module: &shader_module,
567 entry_point: Some(vs_entry),
568 buffers: &[],
569 compilation_options: wgpu::PipelineCompilationOptions::default(),
570 },
571 fragment: Some(wgpu::FragmentState {
572 module: &shader_module,
573 entry_point: Some(fs_entry),
574 targets: color_targets,
575 compilation_options: wgpu::PipelineCompilationOptions::default(),
576 }),
577 primitive: wgpu::PrimitiveState {
578 topology,
579 strip_index_format: None,
580 front_face: wgpu::FrontFace::Ccw,
581 cull_mode: None,
582 polygon_mode: wgpu::PolygonMode::Fill,
583 unclipped_depth: false,
584 conservative: false,
585 },
586 depth_stencil: None,
587 multisample: wgpu::MultisampleState {
588 count: sample_count,
589 mask: !0,
590 alpha_to_coverage_enabled: false,
591 },
592 multiview_mask: None,
593 cache: None,
594 })
595 };
596
597 let quads = create_pipeline(
598 "quads",
599 "vs_quad",
600 "fs_quad",
601 &layouts.globals,
602 &layouts.instances,
603 wgpu::PrimitiveTopology::TriangleStrip,
604 &[Some(color_target.clone())],
605 1,
606 );
607
608 let shadows = create_pipeline(
609 "shadows",
610 "vs_shadow",
611 "fs_shadow",
612 &layouts.globals,
613 &layouts.instances,
614 wgpu::PrimitiveTopology::TriangleStrip,
615 &[Some(color_target.clone())],
616 1,
617 );
618
619 let path_rasterization = create_pipeline(
620 "path_rasterization",
621 "vs_path_rasterization",
622 "fs_path_rasterization",
623 &layouts.globals,
624 &layouts.instances,
625 wgpu::PrimitiveTopology::TriangleList,
626 &[Some(wgpu::ColorTargetState {
627 format: surface_format,
628 blend: Some(wgpu::BlendState::PREMULTIPLIED_ALPHA_BLENDING),
629 write_mask: wgpu::ColorWrites::ALL,
630 })],
631 path_sample_count,
632 );
633
634 let paths_blend = wgpu::BlendState {
635 color: wgpu::BlendComponent {
636 src_factor: wgpu::BlendFactor::One,
637 dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
638 operation: wgpu::BlendOperation::Add,
639 },
640 alpha: wgpu::BlendComponent {
641 src_factor: wgpu::BlendFactor::One,
642 dst_factor: wgpu::BlendFactor::One,
643 operation: wgpu::BlendOperation::Add,
644 },
645 };
646
647 let paths = create_pipeline(
648 "paths",
649 "vs_path",
650 "fs_path",
651 &layouts.globals,
652 &layouts.instances_with_texture,
653 wgpu::PrimitiveTopology::TriangleStrip,
654 &[Some(wgpu::ColorTargetState {
655 format: surface_format,
656 blend: Some(paths_blend),
657 write_mask: wgpu::ColorWrites::ALL,
658 })],
659 1,
660 );
661
662 let underlines = create_pipeline(
663 "underlines",
664 "vs_underline",
665 "fs_underline",
666 &layouts.globals,
667 &layouts.instances,
668 wgpu::PrimitiveTopology::TriangleStrip,
669 &[Some(color_target.clone())],
670 1,
671 );
672
673 let mono_sprites = create_pipeline(
674 "mono_sprites",
675 "vs_mono_sprite",
676 "fs_mono_sprite",
677 &layouts.globals,
678 &layouts.instances_with_texture,
679 wgpu::PrimitiveTopology::TriangleStrip,
680 &[Some(color_target.clone())],
681 1,
682 );
683
684 let subpixel_sprites = if dual_source_blending {
685 let subpixel_blend = wgpu::BlendState {
686 color: wgpu::BlendComponent {
687 src_factor: wgpu::BlendFactor::Src1,
688 dst_factor: wgpu::BlendFactor::OneMinusSrc1,
689 operation: wgpu::BlendOperation::Add,
690 },
691 alpha: wgpu::BlendComponent {
692 src_factor: wgpu::BlendFactor::One,
693 dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
694 operation: wgpu::BlendOperation::Add,
695 },
696 };
697
698 Some(create_pipeline(
699 "subpixel_sprites",
700 "vs_subpixel_sprite",
701 "fs_subpixel_sprite",
702 &layouts.globals,
703 &layouts.instances_with_texture,
704 wgpu::PrimitiveTopology::TriangleStrip,
705 &[Some(wgpu::ColorTargetState {
706 format: surface_format,
707 blend: Some(subpixel_blend),
708 write_mask: wgpu::ColorWrites::COLOR,
709 })],
710 1,
711 ))
712 } else {
713 None
714 };
715
716 let poly_sprites = create_pipeline(
717 "poly_sprites",
718 "vs_poly_sprite",
719 "fs_poly_sprite",
720 &layouts.globals,
721 &layouts.instances_with_texture,
722 wgpu::PrimitiveTopology::TriangleStrip,
723 &[Some(color_target.clone())],
724 1,
725 );
726
727 let surfaces = create_pipeline(
728 "surfaces",
729 "vs_surface",
730 "fs_surface",
731 &layouts.globals,
732 &layouts.surfaces,
733 wgpu::PrimitiveTopology::TriangleStrip,
734 &[Some(color_target)],
735 1,
736 );
737
738 WgpuPipelines {
739 quads,
740 shadows,
741 path_rasterization,
742 paths,
743 underlines,
744 mono_sprites,
745 subpixel_sprites,
746 poly_sprites,
747 surfaces,
748 }
749 }
750
751 fn create_path_intermediate(
752 device: &wgpu::Device,
753 format: wgpu::TextureFormat,
754 width: u32,
755 height: u32,
756 ) -> (wgpu::Texture, wgpu::TextureView) {
757 let texture = device.create_texture(&wgpu::TextureDescriptor {
758 label: Some("path_intermediate"),
759 size: wgpu::Extent3d {
760 width: width.max(1),
761 height: height.max(1),
762 depth_or_array_layers: 1,
763 },
764 mip_level_count: 1,
765 sample_count: 1,
766 dimension: wgpu::TextureDimension::D2,
767 format,
768 usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::TEXTURE_BINDING,
769 view_formats: &[],
770 });
771 let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
772 (texture, view)
773 }
774
775 fn create_msaa_if_needed(
776 device: &wgpu::Device,
777 format: wgpu::TextureFormat,
778 width: u32,
779 height: u32,
780 sample_count: u32,
781 ) -> Option<(wgpu::Texture, wgpu::TextureView)> {
782 if sample_count <= 1 {
783 return None;
784 }
785 let texture = device.create_texture(&wgpu::TextureDescriptor {
786 label: Some("path_msaa"),
787 size: wgpu::Extent3d {
788 width: width.max(1),
789 height: height.max(1),
790 depth_or_array_layers: 1,
791 },
792 mip_level_count: 1,
793 sample_count,
794 dimension: wgpu::TextureDimension::D2,
795 format,
796 usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
797 view_formats: &[],
798 });
799 let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
800 Some((texture, view))
801 }
802
803 pub fn update_drawable_size(&mut self, size: Size<DevicePixels>) {
804 let width = size.width.0 as u32;
805 let height = size.height.0 as u32;
806
807 if width != self.surface_config.width || height != self.surface_config.height {
808 let clamped_width = width.min(self.max_texture_size);
809 let clamped_height = height.min(self.max_texture_size);
810
811 if clamped_width != width || clamped_height != height {
812 warn!(
813 "Requested surface size ({}, {}) exceeds maximum texture dimension {}. \
814 Clamping to ({}, {}). Window content may not fill the entire window.",
815 width, height, self.max_texture_size, clamped_width, clamped_height
816 );
817 }
818
819 // Wait for any in-flight GPU work to complete before destroying textures
820 if let Err(e) = self.device.poll(wgpu::PollType::Wait {
821 submission_index: None,
822 timeout: None,
823 }) {
824 warn!("Failed to poll device during resize: {e:?}");
825 }
826
827 // Destroy old textures before allocating new ones to avoid GPU memory spikes
828 if let Some(ref texture) = self.path_intermediate_texture {
829 texture.destroy();
830 }
831 if let Some(ref texture) = self.path_msaa_texture {
832 texture.destroy();
833 }
834
835 self.surface_config.width = clamped_width.max(1);
836 self.surface_config.height = clamped_height.max(1);
837 if self.surface_configured {
838 self.surface.configure(&self.device, &self.surface_config);
839 }
840
841 // Invalidate intermediate textures - they will be lazily recreated
842 // in draw() after we confirm the surface is healthy. This avoids
843 // panics when the device/surface is in an invalid state during resize.
844 self.path_intermediate_texture = None;
845 self.path_intermediate_view = None;
846 self.path_msaa_texture = None;
847 self.path_msaa_view = None;
848 }
849 }
850
851 fn ensure_intermediate_textures(&mut self) {
852 if self.path_intermediate_texture.is_some() {
853 return;
854 }
855
856 let (path_intermediate_texture, path_intermediate_view) = {
857 let (t, v) = Self::create_path_intermediate(
858 &self.device,
859 self.surface_config.format,
860 self.surface_config.width,
861 self.surface_config.height,
862 );
863 (Some(t), Some(v))
864 };
865 self.path_intermediate_texture = path_intermediate_texture;
866 self.path_intermediate_view = path_intermediate_view;
867
868 let (path_msaa_texture, path_msaa_view) = Self::create_msaa_if_needed(
869 &self.device,
870 self.surface_config.format,
871 self.surface_config.width,
872 self.surface_config.height,
873 self.rendering_params.path_sample_count,
874 )
875 .map(|(t, v)| (Some(t), Some(v)))
876 .unwrap_or((None, None));
877 self.path_msaa_texture = path_msaa_texture;
878 self.path_msaa_view = path_msaa_view;
879 }
880
881 pub fn update_transparency(&mut self, transparent: bool) {
882 let new_alpha_mode = if transparent {
883 self.transparent_alpha_mode
884 } else {
885 self.opaque_alpha_mode
886 };
887
888 if new_alpha_mode != self.surface_config.alpha_mode {
889 self.surface_config.alpha_mode = new_alpha_mode;
890 if self.surface_configured {
891 self.surface.configure(&self.device, &self.surface_config);
892 }
893 self.pipelines = Self::create_pipelines(
894 &self.device,
895 &self.bind_group_layouts,
896 self.surface_config.format,
897 self.surface_config.alpha_mode,
898 self.rendering_params.path_sample_count,
899 self.dual_source_blending,
900 );
901 }
902 }
903
904 #[allow(dead_code)]
905 pub fn viewport_size(&self) -> Size<DevicePixels> {
906 Size {
907 width: DevicePixels(self.surface_config.width as i32),
908 height: DevicePixels(self.surface_config.height as i32),
909 }
910 }
911
912 pub fn sprite_atlas(&self) -> &Arc<WgpuAtlas> {
913 &self.atlas
914 }
915
916 pub fn gpu_specs(&self) -> GpuSpecs {
917 GpuSpecs {
918 is_software_emulated: self.adapter_info.device_type == wgpu::DeviceType::Cpu,
919 device_name: self.adapter_info.name.clone(),
920 driver_name: self.adapter_info.driver.clone(),
921 driver_info: self.adapter_info.driver_info.clone(),
922 }
923 }
924
925 pub fn max_texture_size(&self) -> u32 {
926 self.max_texture_size
927 }
928
929 pub fn draw(&mut self, scene: &Scene) {
930 let last_error = self.last_error.lock().unwrap().take();
931 if let Some(error) = last_error {
932 self.failed_frame_count += 1;
933 log::error!(
934 "GPU error during frame (failure {} of 20): {error}",
935 self.failed_frame_count
936 );
937 if self.failed_frame_count > 20 {
938 panic!("Too many consecutive GPU errors. Last error: {error}");
939 }
940 } else {
941 self.failed_frame_count = 0;
942 }
943
944 self.atlas.before_frame();
945
946 let frame = match self.surface.get_current_texture() {
947 Ok(frame) => frame,
948 Err(wgpu::SurfaceError::Lost | wgpu::SurfaceError::Outdated) => {
949 self.surface_configured = false;
950 return;
951 }
952 Err(e) => {
953 log::error!("Failed to acquire surface texture: {e}");
954 return;
955 }
956 };
957
958 // Now that we know the surface is healthy, ensure intermediate textures exist
959 self.ensure_intermediate_textures();
960
961 let frame_view = frame
962 .texture
963 .create_view(&wgpu::TextureViewDescriptor::default());
964
965 let gamma_params = GammaParams {
966 gamma_ratios: self.rendering_params.gamma_ratios,
967 grayscale_enhanced_contrast: self.rendering_params.grayscale_enhanced_contrast,
968 subpixel_enhanced_contrast: self.rendering_params.subpixel_enhanced_contrast,
969 _pad: [0.0; 2],
970 };
971
972 let globals = GlobalParams {
973 viewport_size: [
974 self.surface_config.width as f32,
975 self.surface_config.height as f32,
976 ],
977 premultiplied_alpha: if self.surface_config.alpha_mode
978 == wgpu::CompositeAlphaMode::PreMultiplied
979 {
980 1
981 } else {
982 0
983 },
984 pad: 0,
985 };
986
987 let path_globals = GlobalParams {
988 premultiplied_alpha: 0,
989 ..globals
990 };
991
992 self.queue
993 .write_buffer(&self.globals_buffer, 0, bytemuck::bytes_of(&globals));
994 self.queue.write_buffer(
995 &self.globals_buffer,
996 self.path_globals_offset,
997 bytemuck::bytes_of(&path_globals),
998 );
999 self.queue.write_buffer(
1000 &self.globals_buffer,
1001 self.gamma_offset,
1002 bytemuck::bytes_of(&gamma_params),
1003 );
1004
1005 loop {
1006 let mut instance_offset: u64 = 0;
1007 let mut overflow = false;
1008
1009 let mut encoder = self
1010 .device
1011 .create_command_encoder(&wgpu::CommandEncoderDescriptor {
1012 label: Some("main_encoder"),
1013 });
1014
1015 {
1016 let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
1017 label: Some("main_pass"),
1018 color_attachments: &[Some(wgpu::RenderPassColorAttachment {
1019 view: &frame_view,
1020 resolve_target: None,
1021 ops: wgpu::Operations {
1022 load: wgpu::LoadOp::Clear(wgpu::Color::TRANSPARENT),
1023 store: wgpu::StoreOp::Store,
1024 },
1025 depth_slice: None,
1026 })],
1027 depth_stencil_attachment: None,
1028 ..Default::default()
1029 });
1030
1031 for batch in scene.batches() {
1032 let ok = match batch {
1033 PrimitiveBatch::Quads(range) => {
1034 self.draw_quads(&scene.quads[range], &mut instance_offset, &mut pass)
1035 }
1036 PrimitiveBatch::Shadows(range) => self.draw_shadows(
1037 &scene.shadows[range],
1038 &mut instance_offset,
1039 &mut pass,
1040 ),
1041 PrimitiveBatch::Paths(range) => {
1042 let paths = &scene.paths[range];
1043 if paths.is_empty() {
1044 continue;
1045 }
1046
1047 drop(pass);
1048
1049 let did_draw = self.draw_paths_to_intermediate(
1050 &mut encoder,
1051 paths,
1052 &mut instance_offset,
1053 );
1054
1055 pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
1056 label: Some("main_pass_continued"),
1057 color_attachments: &[Some(wgpu::RenderPassColorAttachment {
1058 view: &frame_view,
1059 resolve_target: None,
1060 ops: wgpu::Operations {
1061 load: wgpu::LoadOp::Load,
1062 store: wgpu::StoreOp::Store,
1063 },
1064 depth_slice: None,
1065 })],
1066 depth_stencil_attachment: None,
1067 ..Default::default()
1068 });
1069
1070 if did_draw {
1071 self.draw_paths_from_intermediate(
1072 paths,
1073 &mut instance_offset,
1074 &mut pass,
1075 )
1076 } else {
1077 false
1078 }
1079 }
1080 PrimitiveBatch::Underlines(range) => self.draw_underlines(
1081 &scene.underlines[range],
1082 &mut instance_offset,
1083 &mut pass,
1084 ),
1085 PrimitiveBatch::MonochromeSprites { texture_id, range } => self
1086 .draw_monochrome_sprites(
1087 &scene.monochrome_sprites[range],
1088 texture_id,
1089 &mut instance_offset,
1090 &mut pass,
1091 ),
1092 PrimitiveBatch::SubpixelSprites { texture_id, range } => self
1093 .draw_subpixel_sprites(
1094 &scene.subpixel_sprites[range],
1095 texture_id,
1096 &mut instance_offset,
1097 &mut pass,
1098 ),
1099 PrimitiveBatch::PolychromeSprites { texture_id, range } => self
1100 .draw_polychrome_sprites(
1101 &scene.polychrome_sprites[range],
1102 texture_id,
1103 &mut instance_offset,
1104 &mut pass,
1105 ),
1106 PrimitiveBatch::Surfaces(_surfaces) => {
1107 // Surfaces are macOS-only for video playback
1108 // Not implemented for Linux/wgpu
1109 true
1110 }
1111 };
1112 if !ok {
1113 overflow = true;
1114 break;
1115 }
1116 }
1117 }
1118
1119 if overflow {
1120 drop(encoder);
1121 if self.instance_buffer_capacity >= 256 * 1024 * 1024 {
1122 log::error!(
1123 "instance buffer size grew too large: {}",
1124 self.instance_buffer_capacity
1125 );
1126 frame.present();
1127 return;
1128 }
1129 self.grow_instance_buffer();
1130 continue;
1131 }
1132
1133 self.queue.submit(std::iter::once(encoder.finish()));
1134 frame.present();
1135 return;
1136 }
1137 }
1138
1139 fn draw_quads(
1140 &self,
1141 quads: &[Quad],
1142 instance_offset: &mut u64,
1143 pass: &mut wgpu::RenderPass<'_>,
1144 ) -> bool {
1145 let data = unsafe { Self::instance_bytes(quads) };
1146 self.draw_instances(
1147 data,
1148 quads.len() as u32,
1149 &self.pipelines.quads,
1150 instance_offset,
1151 pass,
1152 )
1153 }
1154
1155 fn draw_shadows(
1156 &self,
1157 shadows: &[Shadow],
1158 instance_offset: &mut u64,
1159 pass: &mut wgpu::RenderPass<'_>,
1160 ) -> bool {
1161 let data = unsafe { Self::instance_bytes(shadows) };
1162 self.draw_instances(
1163 data,
1164 shadows.len() as u32,
1165 &self.pipelines.shadows,
1166 instance_offset,
1167 pass,
1168 )
1169 }
1170
1171 fn draw_underlines(
1172 &self,
1173 underlines: &[Underline],
1174 instance_offset: &mut u64,
1175 pass: &mut wgpu::RenderPass<'_>,
1176 ) -> bool {
1177 let data = unsafe { Self::instance_bytes(underlines) };
1178 self.draw_instances(
1179 data,
1180 underlines.len() as u32,
1181 &self.pipelines.underlines,
1182 instance_offset,
1183 pass,
1184 )
1185 }
1186
1187 fn draw_monochrome_sprites(
1188 &self,
1189 sprites: &[MonochromeSprite],
1190 texture_id: AtlasTextureId,
1191 instance_offset: &mut u64,
1192 pass: &mut wgpu::RenderPass<'_>,
1193 ) -> bool {
1194 let tex_info = self.atlas.get_texture_info(texture_id);
1195 let data = unsafe { Self::instance_bytes(sprites) };
1196 self.draw_instances_with_texture(
1197 data,
1198 sprites.len() as u32,
1199 &tex_info.view,
1200 &self.pipelines.mono_sprites,
1201 instance_offset,
1202 pass,
1203 )
1204 }
1205
1206 fn draw_subpixel_sprites(
1207 &self,
1208 sprites: &[SubpixelSprite],
1209 texture_id: AtlasTextureId,
1210 instance_offset: &mut u64,
1211 pass: &mut wgpu::RenderPass<'_>,
1212 ) -> bool {
1213 let tex_info = self.atlas.get_texture_info(texture_id);
1214 let data = unsafe { Self::instance_bytes(sprites) };
1215 let pipeline = self
1216 .pipelines
1217 .subpixel_sprites
1218 .as_ref()
1219 .unwrap_or(&self.pipelines.mono_sprites);
1220 self.draw_instances_with_texture(
1221 data,
1222 sprites.len() as u32,
1223 &tex_info.view,
1224 pipeline,
1225 instance_offset,
1226 pass,
1227 )
1228 }
1229
1230 fn draw_polychrome_sprites(
1231 &self,
1232 sprites: &[PolychromeSprite],
1233 texture_id: AtlasTextureId,
1234 instance_offset: &mut u64,
1235 pass: &mut wgpu::RenderPass<'_>,
1236 ) -> bool {
1237 let tex_info = self.atlas.get_texture_info(texture_id);
1238 let data = unsafe { Self::instance_bytes(sprites) };
1239 self.draw_instances_with_texture(
1240 data,
1241 sprites.len() as u32,
1242 &tex_info.view,
1243 &self.pipelines.poly_sprites,
1244 instance_offset,
1245 pass,
1246 )
1247 }
1248
1249 fn draw_instances(
1250 &self,
1251 data: &[u8],
1252 instance_count: u32,
1253 pipeline: &wgpu::RenderPipeline,
1254 instance_offset: &mut u64,
1255 pass: &mut wgpu::RenderPass<'_>,
1256 ) -> bool {
1257 if instance_count == 0 {
1258 return true;
1259 }
1260 let Some((offset, size)) = self.write_to_instance_buffer(instance_offset, data) else {
1261 return false;
1262 };
1263 let bind_group = self.device.create_bind_group(&wgpu::BindGroupDescriptor {
1264 label: None,
1265 layout: &self.bind_group_layouts.instances,
1266 entries: &[wgpu::BindGroupEntry {
1267 binding: 0,
1268 resource: self.instance_binding(offset, size),
1269 }],
1270 });
1271 pass.set_pipeline(pipeline);
1272 pass.set_bind_group(0, &self.globals_bind_group, &[]);
1273 pass.set_bind_group(1, &bind_group, &[]);
1274 pass.draw(0..4, 0..instance_count);
1275 true
1276 }
1277
1278 fn draw_instances_with_texture(
1279 &self,
1280 data: &[u8],
1281 instance_count: u32,
1282 texture_view: &wgpu::TextureView,
1283 pipeline: &wgpu::RenderPipeline,
1284 instance_offset: &mut u64,
1285 pass: &mut wgpu::RenderPass<'_>,
1286 ) -> bool {
1287 if instance_count == 0 {
1288 return true;
1289 }
1290 let Some((offset, size)) = self.write_to_instance_buffer(instance_offset, data) else {
1291 return false;
1292 };
1293 let bind_group = self.device.create_bind_group(&wgpu::BindGroupDescriptor {
1294 label: None,
1295 layout: &self.bind_group_layouts.instances_with_texture,
1296 entries: &[
1297 wgpu::BindGroupEntry {
1298 binding: 0,
1299 resource: self.instance_binding(offset, size),
1300 },
1301 wgpu::BindGroupEntry {
1302 binding: 1,
1303 resource: wgpu::BindingResource::TextureView(texture_view),
1304 },
1305 wgpu::BindGroupEntry {
1306 binding: 2,
1307 resource: wgpu::BindingResource::Sampler(&self.atlas_sampler),
1308 },
1309 ],
1310 });
1311 pass.set_pipeline(pipeline);
1312 pass.set_bind_group(0, &self.globals_bind_group, &[]);
1313 pass.set_bind_group(1, &bind_group, &[]);
1314 pass.draw(0..4, 0..instance_count);
1315 true
1316 }
1317
1318 unsafe fn instance_bytes<T>(instances: &[T]) -> &[u8] {
1319 unsafe {
1320 std::slice::from_raw_parts(
1321 instances.as_ptr() as *const u8,
1322 std::mem::size_of_val(instances),
1323 )
1324 }
1325 }
1326
1327 fn draw_paths_from_intermediate(
1328 &self,
1329 paths: &[Path<ScaledPixels>],
1330 instance_offset: &mut u64,
1331 pass: &mut wgpu::RenderPass<'_>,
1332 ) -> bool {
1333 let first_path = &paths[0];
1334 let sprites: Vec<PathSprite> = if paths.last().map(|p| &p.order) == Some(&first_path.order)
1335 {
1336 paths
1337 .iter()
1338 .map(|p| PathSprite {
1339 bounds: p.clipped_bounds(),
1340 })
1341 .collect()
1342 } else {
1343 let mut bounds = first_path.clipped_bounds();
1344 for path in paths.iter().skip(1) {
1345 bounds = bounds.union(&path.clipped_bounds());
1346 }
1347 vec![PathSprite { bounds }]
1348 };
1349
1350 let Some(path_intermediate_view) = self.path_intermediate_view.as_ref() else {
1351 return true;
1352 };
1353
1354 let sprite_data = unsafe { Self::instance_bytes(&sprites) };
1355 self.draw_instances_with_texture(
1356 sprite_data,
1357 sprites.len() as u32,
1358 path_intermediate_view,
1359 &self.pipelines.paths,
1360 instance_offset,
1361 pass,
1362 )
1363 }
1364
1365 fn draw_paths_to_intermediate(
1366 &self,
1367 encoder: &mut wgpu::CommandEncoder,
1368 paths: &[Path<ScaledPixels>],
1369 instance_offset: &mut u64,
1370 ) -> bool {
1371 let mut vertices = Vec::new();
1372 for path in paths {
1373 let bounds = path.clipped_bounds();
1374 vertices.extend(path.vertices.iter().map(|v| PathRasterizationVertex {
1375 xy_position: v.xy_position,
1376 st_position: v.st_position,
1377 color: path.color,
1378 bounds,
1379 }));
1380 }
1381
1382 if vertices.is_empty() {
1383 return true;
1384 }
1385
1386 let vertex_data = unsafe { Self::instance_bytes(&vertices) };
1387 let Some((vertex_offset, vertex_size)) =
1388 self.write_to_instance_buffer(instance_offset, vertex_data)
1389 else {
1390 return false;
1391 };
1392
1393 let data_bind_group = self.device.create_bind_group(&wgpu::BindGroupDescriptor {
1394 label: Some("path_rasterization_bind_group"),
1395 layout: &self.bind_group_layouts.instances,
1396 entries: &[wgpu::BindGroupEntry {
1397 binding: 0,
1398 resource: self.instance_binding(vertex_offset, vertex_size),
1399 }],
1400 });
1401
1402 let Some(path_intermediate_view) = self.path_intermediate_view.as_ref() else {
1403 return true;
1404 };
1405
1406 let (target_view, resolve_target) = if let Some(ref msaa_view) = self.path_msaa_view {
1407 (msaa_view, Some(path_intermediate_view))
1408 } else {
1409 (path_intermediate_view, None)
1410 };
1411
1412 {
1413 let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
1414 label: Some("path_rasterization_pass"),
1415 color_attachments: &[Some(wgpu::RenderPassColorAttachment {
1416 view: target_view,
1417 resolve_target,
1418 ops: wgpu::Operations {
1419 load: wgpu::LoadOp::Clear(wgpu::Color::TRANSPARENT),
1420 store: wgpu::StoreOp::Store,
1421 },
1422 depth_slice: None,
1423 })],
1424 depth_stencil_attachment: None,
1425 ..Default::default()
1426 });
1427
1428 pass.set_pipeline(&self.pipelines.path_rasterization);
1429 pass.set_bind_group(0, &self.path_globals_bind_group, &[]);
1430 pass.set_bind_group(1, &data_bind_group, &[]);
1431 pass.draw(0..vertices.len() as u32, 0..1);
1432 }
1433
1434 true
1435 }
1436
1437 fn grow_instance_buffer(&mut self) {
1438 let new_capacity = self.instance_buffer_capacity * 2;
1439 log::info!("increased instance buffer size to {}", new_capacity);
1440 self.instance_buffer = self.device.create_buffer(&wgpu::BufferDescriptor {
1441 label: Some("instance_buffer"),
1442 size: new_capacity,
1443 usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
1444 mapped_at_creation: false,
1445 });
1446 self.instance_buffer_capacity = new_capacity;
1447 }
1448
1449 fn write_to_instance_buffer(
1450 &self,
1451 instance_offset: &mut u64,
1452 data: &[u8],
1453 ) -> Option<(u64, NonZeroU64)> {
1454 let offset = (*instance_offset).next_multiple_of(self.storage_buffer_alignment);
1455 let size = (data.len() as u64).max(16);
1456 if offset + size > self.instance_buffer_capacity {
1457 return None;
1458 }
1459 self.queue.write_buffer(&self.instance_buffer, offset, data);
1460 *instance_offset = offset + size;
1461 Some((offset, NonZeroU64::new(size).expect("size is at least 16")))
1462 }
1463
1464 fn instance_binding(&self, offset: u64, size: NonZeroU64) -> wgpu::BindingResource<'_> {
1465 wgpu::BindingResource::Buffer(wgpu::BufferBinding {
1466 buffer: &self.instance_buffer,
1467 offset,
1468 size: Some(size),
1469 })
1470 }
1471
1472 pub fn destroy(&mut self) {
1473 // wgpu resources are automatically cleaned up when dropped
1474 }
1475}
1476
1477struct RenderingParameters {
1478 path_sample_count: u32,
1479 gamma_ratios: [f32; 4],
1480 grayscale_enhanced_contrast: f32,
1481 subpixel_enhanced_contrast: f32,
1482}
1483
1484impl RenderingParameters {
1485 fn new(adapter: &wgpu::Adapter, surface_format: wgpu::TextureFormat) -> Self {
1486 use std::env;
1487
1488 let format_features = adapter.get_texture_format_features(surface_format);
1489 let path_sample_count = [4, 2, 1]
1490 .into_iter()
1491 .find(|&n| format_features.flags.sample_count_supported(n))
1492 .unwrap_or(1);
1493
1494 let gamma = env::var("ZED_FONTS_GAMMA")
1495 .ok()
1496 .and_then(|v| v.parse().ok())
1497 .unwrap_or(1.8_f32)
1498 .clamp(1.0, 2.2);
1499 let gamma_ratios = get_gamma_correction_ratios(gamma);
1500
1501 let grayscale_enhanced_contrast = env::var("ZED_FONTS_GRAYSCALE_ENHANCED_CONTRAST")
1502 .ok()
1503 .and_then(|v| v.parse().ok())
1504 .unwrap_or(1.0_f32)
1505 .max(0.0);
1506
1507 let subpixel_enhanced_contrast = env::var("ZED_FONTS_SUBPIXEL_ENHANCED_CONTRAST")
1508 .ok()
1509 .and_then(|v| v.parse().ok())
1510 .unwrap_or(0.5_f32)
1511 .max(0.0);
1512
1513 Self {
1514 path_sample_count,
1515 gamma_ratios,
1516 grayscale_enhanced_contrast,
1517 subpixel_enhanced_contrast,
1518 }
1519 }
1520}