1#[cfg(not(target_family = "wasm"))]
2use crate::CompositorGpuHint;
3use crate::{WgpuAtlas, WgpuContext};
4use bytemuck::{Pod, Zeroable};
5use gpui::{
6 AtlasTextureId, Background, Bounds, DevicePixels, GpuSpecs, MonochromeSprite, Path, Point,
7 PolychromeSprite, PrimitiveBatch, Quad, ScaledPixels, Scene, Shadow, Size, SubpixelSprite,
8 Underline, get_gamma_correction_ratios,
9};
10use log::warn;
11use raw_window_handle::{HasDisplayHandle, HasWindowHandle};
12use std::num::NonZeroU64;
13use std::sync::{Arc, Mutex};
14
15#[repr(C)]
16#[derive(Clone, Copy, Pod, Zeroable)]
17struct GlobalParams {
18 viewport_size: [f32; 2],
19 premultiplied_alpha: u32,
20 pad: u32,
21}
22
23#[repr(C)]
24#[derive(Clone, Copy, Pod, Zeroable)]
25struct PodBounds {
26 origin: [f32; 2],
27 size: [f32; 2],
28}
29
30impl From<Bounds<ScaledPixels>> for PodBounds {
31 fn from(bounds: Bounds<ScaledPixels>) -> Self {
32 Self {
33 origin: [bounds.origin.x.0, bounds.origin.y.0],
34 size: [bounds.size.width.0, bounds.size.height.0],
35 }
36 }
37}
38
39#[repr(C)]
40#[derive(Clone, Copy, Pod, Zeroable)]
41struct SurfaceParams {
42 bounds: PodBounds,
43 content_mask: PodBounds,
44}
45
46#[repr(C)]
47#[derive(Clone, Copy, Pod, Zeroable)]
48struct GammaParams {
49 gamma_ratios: [f32; 4],
50 grayscale_enhanced_contrast: f32,
51 subpixel_enhanced_contrast: f32,
52 _pad: [f32; 2],
53}
54
55#[derive(Clone, Debug)]
56#[repr(C)]
57struct PathSprite {
58 bounds: Bounds<ScaledPixels>,
59}
60
61#[derive(Clone, Debug)]
62#[repr(C)]
63struct PathRasterizationVertex {
64 xy_position: Point<ScaledPixels>,
65 st_position: Point<f32>,
66 color: Background,
67 bounds: Bounds<ScaledPixels>,
68}
69
70pub struct WgpuSurfaceConfig {
71 pub size: Size<DevicePixels>,
72 pub transparent: bool,
73}
74
75struct WgpuPipelines {
76 quads: wgpu::RenderPipeline,
77 shadows: wgpu::RenderPipeline,
78 path_rasterization: wgpu::RenderPipeline,
79 paths: wgpu::RenderPipeline,
80 underlines: wgpu::RenderPipeline,
81 mono_sprites: wgpu::RenderPipeline,
82 subpixel_sprites: Option<wgpu::RenderPipeline>,
83 poly_sprites: wgpu::RenderPipeline,
84 #[allow(dead_code)]
85 surfaces: wgpu::RenderPipeline,
86}
87
88struct WgpuBindGroupLayouts {
89 globals: wgpu::BindGroupLayout,
90 instances: wgpu::BindGroupLayout,
91 instances_with_texture: wgpu::BindGroupLayout,
92 surfaces: wgpu::BindGroupLayout,
93}
94
95pub struct WgpuRenderer {
96 device: Arc<wgpu::Device>,
97 queue: Arc<wgpu::Queue>,
98 surface: wgpu::Surface<'static>,
99 surface_config: wgpu::SurfaceConfiguration,
100 pipelines: WgpuPipelines,
101 bind_group_layouts: WgpuBindGroupLayouts,
102 atlas: Arc<WgpuAtlas>,
103 atlas_sampler: wgpu::Sampler,
104 globals_buffer: wgpu::Buffer,
105 path_globals_offset: u64,
106 gamma_offset: u64,
107 globals_bind_group: wgpu::BindGroup,
108 path_globals_bind_group: wgpu::BindGroup,
109 instance_buffer: wgpu::Buffer,
110 instance_buffer_capacity: u64,
111 storage_buffer_alignment: u64,
112 path_intermediate_texture: Option<wgpu::Texture>,
113 path_intermediate_view: Option<wgpu::TextureView>,
114 path_msaa_texture: Option<wgpu::Texture>,
115 path_msaa_view: Option<wgpu::TextureView>,
116 rendering_params: RenderingParameters,
117 dual_source_blending: bool,
118 adapter_info: wgpu::AdapterInfo,
119 transparent_alpha_mode: wgpu::CompositeAlphaMode,
120 opaque_alpha_mode: wgpu::CompositeAlphaMode,
121 max_texture_size: u32,
122 last_error: Arc<Mutex<Option<String>>>,
123 failed_frame_count: u32,
124}
125
126impl WgpuRenderer {
127 /// Creates a new WgpuRenderer from raw window handles.
128 ///
129 /// # Safety
130 /// The caller must ensure that the window handle remains valid for the lifetime
131 /// of the returned renderer.
132 pub fn new<W: HasWindowHandle + HasDisplayHandle>(
133 gpu_context: &mut Option<WgpuContext>,
134 window: &W,
135 config: WgpuSurfaceConfig,
136 compositor_gpu: Option<CompositorGpuHint>,
137 ) -> anyhow::Result<Self> {
138 let window_handle = window
139 .window_handle()
140 .map_err(|e| anyhow::anyhow!("Failed to get window handle: {e}"))?;
141 let display_handle = window
142 .display_handle()
143 .map_err(|e| anyhow::anyhow!("Failed to get display handle: {e}"))?;
144
145 let target = wgpu::SurfaceTargetUnsafe::RawHandle {
146 raw_display_handle: display_handle.as_raw(),
147 raw_window_handle: window_handle.as_raw(),
148 };
149
150 // Use the existing context's instance if available, otherwise create a new one.
151 // The surface must be created with the same instance that will be used for
152 // adapter selection, otherwise wgpu will panic.
153 let instance = gpu_context
154 .as_ref()
155 .map(|ctx| ctx.instance.clone())
156 .unwrap_or_else(WgpuContext::instance);
157
158 // Safety: The caller guarantees that the window handle is valid for the
159 // lifetime of this renderer. In practice, the RawWindow struct is created
160 // from the native window handles and the surface is dropped before the window.
161 let surface = unsafe {
162 instance
163 .create_surface_unsafe(target)
164 .map_err(|e| anyhow::anyhow!("Failed to create surface: {e}"))?
165 };
166
167 let context = match gpu_context {
168 Some(context) => {
169 context.check_compatible_with_surface(&surface)?;
170 context
171 }
172 None => gpu_context.insert(WgpuContext::new(instance, &surface, compositor_gpu)?),
173 };
174
175 Self::new_with_surface(context, surface, config)
176 }
177
178 fn new_with_surface(
179 context: &WgpuContext,
180 surface: wgpu::Surface<'static>,
181 config: WgpuSurfaceConfig,
182 ) -> anyhow::Result<Self> {
183 let surface_caps = surface.get_capabilities(&context.adapter);
184 let preferred_formats = [
185 wgpu::TextureFormat::Bgra8Unorm,
186 wgpu::TextureFormat::Rgba8Unorm,
187 ];
188 let surface_format = preferred_formats
189 .iter()
190 .find(|f| surface_caps.formats.contains(f))
191 .copied()
192 .or_else(|| surface_caps.formats.iter().find(|f| !f.is_srgb()).copied())
193 .or_else(|| surface_caps.formats.first().copied())
194 .ok_or_else(|| {
195 anyhow::anyhow!(
196 "Surface reports no supported texture formats for adapter {:?}",
197 context.adapter.get_info().name
198 )
199 })?;
200
201 let pick_alpha_mode =
202 |preferences: &[wgpu::CompositeAlphaMode]| -> anyhow::Result<wgpu::CompositeAlphaMode> {
203 preferences
204 .iter()
205 .find(|p| surface_caps.alpha_modes.contains(p))
206 .copied()
207 .or_else(|| surface_caps.alpha_modes.first().copied())
208 .ok_or_else(|| {
209 anyhow::anyhow!(
210 "Surface reports no supported alpha modes for adapter {:?}",
211 context.adapter.get_info().name
212 )
213 })
214 };
215
216 let transparent_alpha_mode = pick_alpha_mode(&[
217 wgpu::CompositeAlphaMode::PreMultiplied,
218 wgpu::CompositeAlphaMode::Inherit,
219 ])?;
220
221 let opaque_alpha_mode = pick_alpha_mode(&[
222 wgpu::CompositeAlphaMode::Opaque,
223 wgpu::CompositeAlphaMode::Inherit,
224 ])?;
225
226 let alpha_mode = if config.transparent {
227 transparent_alpha_mode
228 } else {
229 opaque_alpha_mode
230 };
231
232 let device = Arc::clone(&context.device);
233 let max_texture_size = device.limits().max_texture_dimension_2d;
234
235 let requested_width = config.size.width.0 as u32;
236 let requested_height = config.size.height.0 as u32;
237 let clamped_width = requested_width.min(max_texture_size);
238 let clamped_height = requested_height.min(max_texture_size);
239
240 if clamped_width != requested_width || clamped_height != requested_height {
241 warn!(
242 "Requested surface size ({}, {}) exceeds maximum texture dimension {}. \
243 Clamping to ({}, {}). Window content may not fill the entire window.",
244 requested_width, requested_height, max_texture_size, clamped_width, clamped_height
245 );
246 }
247
248 let surface_config = wgpu::SurfaceConfiguration {
249 usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
250 format: surface_format,
251 width: clamped_width.max(1),
252 height: clamped_height.max(1),
253 present_mode: wgpu::PresentMode::Fifo,
254 desired_maximum_frame_latency: 2,
255 alpha_mode,
256 view_formats: vec![],
257 };
258 // Configure the surface immediately. The adapter selection process already validated
259 // that this adapter can successfully configure this surface.
260 surface.configure(&context.device, &surface_config);
261
262 let queue = Arc::clone(&context.queue);
263 let dual_source_blending = context.supports_dual_source_blending();
264
265 let rendering_params = RenderingParameters::new(&context.adapter, surface_format);
266 let bind_group_layouts = Self::create_bind_group_layouts(&device);
267 let pipelines = Self::create_pipelines(
268 &device,
269 &bind_group_layouts,
270 surface_format,
271 alpha_mode,
272 rendering_params.path_sample_count,
273 dual_source_blending,
274 );
275
276 let atlas = Arc::new(WgpuAtlas::new(Arc::clone(&device), Arc::clone(&queue)));
277 let atlas_sampler = device.create_sampler(&wgpu::SamplerDescriptor {
278 label: Some("atlas_sampler"),
279 mag_filter: wgpu::FilterMode::Linear,
280 min_filter: wgpu::FilterMode::Linear,
281 ..Default::default()
282 });
283
284 let uniform_alignment = device.limits().min_uniform_buffer_offset_alignment as u64;
285 let globals_size = std::mem::size_of::<GlobalParams>() as u64;
286 let gamma_size = std::mem::size_of::<GammaParams>() as u64;
287 let path_globals_offset = globals_size.next_multiple_of(uniform_alignment);
288 let gamma_offset = (path_globals_offset + globals_size).next_multiple_of(uniform_alignment);
289
290 let globals_buffer = device.create_buffer(&wgpu::BufferDescriptor {
291 label: Some("globals_buffer"),
292 size: gamma_offset + gamma_size,
293 usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
294 mapped_at_creation: false,
295 });
296
297 let storage_buffer_alignment = device.limits().min_storage_buffer_offset_alignment as u64;
298 let initial_instance_buffer_capacity = 2 * 1024 * 1024;
299 let instance_buffer = device.create_buffer(&wgpu::BufferDescriptor {
300 label: Some("instance_buffer"),
301 size: initial_instance_buffer_capacity,
302 usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
303 mapped_at_creation: false,
304 });
305
306 let globals_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
307 label: Some("globals_bind_group"),
308 layout: &bind_group_layouts.globals,
309 entries: &[
310 wgpu::BindGroupEntry {
311 binding: 0,
312 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
313 buffer: &globals_buffer,
314 offset: 0,
315 size: Some(NonZeroU64::new(globals_size).unwrap()),
316 }),
317 },
318 wgpu::BindGroupEntry {
319 binding: 1,
320 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
321 buffer: &globals_buffer,
322 offset: gamma_offset,
323 size: Some(NonZeroU64::new(gamma_size).unwrap()),
324 }),
325 },
326 ],
327 });
328
329 let path_globals_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
330 label: Some("path_globals_bind_group"),
331 layout: &bind_group_layouts.globals,
332 entries: &[
333 wgpu::BindGroupEntry {
334 binding: 0,
335 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
336 buffer: &globals_buffer,
337 offset: path_globals_offset,
338 size: Some(NonZeroU64::new(globals_size).unwrap()),
339 }),
340 },
341 wgpu::BindGroupEntry {
342 binding: 1,
343 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
344 buffer: &globals_buffer,
345 offset: gamma_offset,
346 size: Some(NonZeroU64::new(gamma_size).unwrap()),
347 }),
348 },
349 ],
350 });
351
352 let adapter_info = context.adapter.get_info();
353
354 let last_error: Arc<Mutex<Option<String>>> = Arc::new(Mutex::new(None));
355 let last_error_clone = Arc::clone(&last_error);
356 device.on_uncaptured_error(Arc::new(move |error| {
357 let mut guard = last_error_clone.lock().unwrap();
358 *guard = Some(error.to_string());
359 }));
360
361 Ok(Self {
362 device,
363 queue,
364 surface,
365 surface_config,
366 pipelines,
367 bind_group_layouts,
368 atlas,
369 atlas_sampler,
370 globals_buffer,
371 path_globals_offset,
372 gamma_offset,
373 globals_bind_group,
374 path_globals_bind_group,
375 instance_buffer,
376 instance_buffer_capacity: initial_instance_buffer_capacity,
377 storage_buffer_alignment,
378 // Defer intermediate texture creation to first draw call via ensure_intermediate_textures().
379 // This avoids panics when the device/surface is in an invalid state during initialization.
380 path_intermediate_texture: None,
381 path_intermediate_view: None,
382 path_msaa_texture: None,
383 path_msaa_view: None,
384 rendering_params,
385 dual_source_blending,
386 adapter_info,
387 transparent_alpha_mode,
388 opaque_alpha_mode,
389 max_texture_size,
390 last_error,
391 failed_frame_count: 0,
392 })
393 }
394
395 fn create_bind_group_layouts(device: &wgpu::Device) -> WgpuBindGroupLayouts {
396 let globals =
397 device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
398 label: Some("globals_layout"),
399 entries: &[
400 wgpu::BindGroupLayoutEntry {
401 binding: 0,
402 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
403 ty: wgpu::BindingType::Buffer {
404 ty: wgpu::BufferBindingType::Uniform,
405 has_dynamic_offset: false,
406 min_binding_size: NonZeroU64::new(
407 std::mem::size_of::<GlobalParams>() as u64
408 ),
409 },
410 count: None,
411 },
412 wgpu::BindGroupLayoutEntry {
413 binding: 1,
414 visibility: wgpu::ShaderStages::FRAGMENT,
415 ty: wgpu::BindingType::Buffer {
416 ty: wgpu::BufferBindingType::Uniform,
417 has_dynamic_offset: false,
418 min_binding_size: NonZeroU64::new(
419 std::mem::size_of::<GammaParams>() as u64
420 ),
421 },
422 count: None,
423 },
424 ],
425 });
426
427 let storage_buffer_entry = |binding: u32| wgpu::BindGroupLayoutEntry {
428 binding,
429 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
430 ty: wgpu::BindingType::Buffer {
431 ty: wgpu::BufferBindingType::Storage { read_only: true },
432 has_dynamic_offset: false,
433 min_binding_size: None,
434 },
435 count: None,
436 };
437
438 let instances = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
439 label: Some("instances_layout"),
440 entries: &[storage_buffer_entry(0)],
441 });
442
443 let instances_with_texture =
444 device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
445 label: Some("instances_with_texture_layout"),
446 entries: &[
447 storage_buffer_entry(0),
448 wgpu::BindGroupLayoutEntry {
449 binding: 1,
450 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
451 ty: wgpu::BindingType::Texture {
452 sample_type: wgpu::TextureSampleType::Float { filterable: true },
453 view_dimension: wgpu::TextureViewDimension::D2,
454 multisampled: false,
455 },
456 count: None,
457 },
458 wgpu::BindGroupLayoutEntry {
459 binding: 2,
460 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
461 ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
462 count: None,
463 },
464 ],
465 });
466
467 let surfaces = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
468 label: Some("surfaces_layout"),
469 entries: &[
470 wgpu::BindGroupLayoutEntry {
471 binding: 0,
472 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
473 ty: wgpu::BindingType::Buffer {
474 ty: wgpu::BufferBindingType::Uniform,
475 has_dynamic_offset: false,
476 min_binding_size: NonZeroU64::new(
477 std::mem::size_of::<SurfaceParams>() as u64
478 ),
479 },
480 count: None,
481 },
482 wgpu::BindGroupLayoutEntry {
483 binding: 1,
484 visibility: wgpu::ShaderStages::FRAGMENT,
485 ty: wgpu::BindingType::Texture {
486 sample_type: wgpu::TextureSampleType::Float { filterable: true },
487 view_dimension: wgpu::TextureViewDimension::D2,
488 multisampled: false,
489 },
490 count: None,
491 },
492 wgpu::BindGroupLayoutEntry {
493 binding: 2,
494 visibility: wgpu::ShaderStages::FRAGMENT,
495 ty: wgpu::BindingType::Texture {
496 sample_type: wgpu::TextureSampleType::Float { filterable: true },
497 view_dimension: wgpu::TextureViewDimension::D2,
498 multisampled: false,
499 },
500 count: None,
501 },
502 wgpu::BindGroupLayoutEntry {
503 binding: 3,
504 visibility: wgpu::ShaderStages::FRAGMENT,
505 ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
506 count: None,
507 },
508 ],
509 });
510
511 WgpuBindGroupLayouts {
512 globals,
513 instances,
514 instances_with_texture,
515 surfaces,
516 }
517 }
518
519 fn create_pipelines(
520 device: &wgpu::Device,
521 layouts: &WgpuBindGroupLayouts,
522 surface_format: wgpu::TextureFormat,
523 alpha_mode: wgpu::CompositeAlphaMode,
524 path_sample_count: u32,
525 dual_source_blending: bool,
526 ) -> WgpuPipelines {
527 let shader_source = include_str!("shaders.wgsl");
528 let shader_module = device.create_shader_module(wgpu::ShaderModuleDescriptor {
529 label: Some("gpui_shaders"),
530 source: wgpu::ShaderSource::Wgsl(shader_source.into()),
531 });
532
533 let blend_mode = match alpha_mode {
534 wgpu::CompositeAlphaMode::PreMultiplied => {
535 wgpu::BlendState::PREMULTIPLIED_ALPHA_BLENDING
536 }
537 _ => wgpu::BlendState::ALPHA_BLENDING,
538 };
539
540 let color_target = wgpu::ColorTargetState {
541 format: surface_format,
542 blend: Some(blend_mode),
543 write_mask: wgpu::ColorWrites::ALL,
544 };
545
546 let create_pipeline = |name: &str,
547 vs_entry: &str,
548 fs_entry: &str,
549 globals_layout: &wgpu::BindGroupLayout,
550 data_layout: &wgpu::BindGroupLayout,
551 topology: wgpu::PrimitiveTopology,
552 color_targets: &[Option<wgpu::ColorTargetState>],
553 sample_count: u32| {
554 let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
555 label: Some(&format!("{name}_layout")),
556 bind_group_layouts: &[globals_layout, data_layout],
557 immediate_size: 0,
558 });
559
560 device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
561 label: Some(name),
562 layout: Some(&pipeline_layout),
563 vertex: wgpu::VertexState {
564 module: &shader_module,
565 entry_point: Some(vs_entry),
566 buffers: &[],
567 compilation_options: wgpu::PipelineCompilationOptions::default(),
568 },
569 fragment: Some(wgpu::FragmentState {
570 module: &shader_module,
571 entry_point: Some(fs_entry),
572 targets: color_targets,
573 compilation_options: wgpu::PipelineCompilationOptions::default(),
574 }),
575 primitive: wgpu::PrimitiveState {
576 topology,
577 strip_index_format: None,
578 front_face: wgpu::FrontFace::Ccw,
579 cull_mode: None,
580 polygon_mode: wgpu::PolygonMode::Fill,
581 unclipped_depth: false,
582 conservative: false,
583 },
584 depth_stencil: None,
585 multisample: wgpu::MultisampleState {
586 count: sample_count,
587 mask: !0,
588 alpha_to_coverage_enabled: false,
589 },
590 multiview_mask: None,
591 cache: None,
592 })
593 };
594
595 let quads = create_pipeline(
596 "quads",
597 "vs_quad",
598 "fs_quad",
599 &layouts.globals,
600 &layouts.instances,
601 wgpu::PrimitiveTopology::TriangleStrip,
602 &[Some(color_target.clone())],
603 1,
604 );
605
606 let shadows = create_pipeline(
607 "shadows",
608 "vs_shadow",
609 "fs_shadow",
610 &layouts.globals,
611 &layouts.instances,
612 wgpu::PrimitiveTopology::TriangleStrip,
613 &[Some(color_target.clone())],
614 1,
615 );
616
617 let path_rasterization = create_pipeline(
618 "path_rasterization",
619 "vs_path_rasterization",
620 "fs_path_rasterization",
621 &layouts.globals,
622 &layouts.instances,
623 wgpu::PrimitiveTopology::TriangleList,
624 &[Some(wgpu::ColorTargetState {
625 format: surface_format,
626 blend: Some(wgpu::BlendState::PREMULTIPLIED_ALPHA_BLENDING),
627 write_mask: wgpu::ColorWrites::ALL,
628 })],
629 path_sample_count,
630 );
631
632 let paths_blend = wgpu::BlendState {
633 color: wgpu::BlendComponent {
634 src_factor: wgpu::BlendFactor::One,
635 dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
636 operation: wgpu::BlendOperation::Add,
637 },
638 alpha: wgpu::BlendComponent {
639 src_factor: wgpu::BlendFactor::One,
640 dst_factor: wgpu::BlendFactor::One,
641 operation: wgpu::BlendOperation::Add,
642 },
643 };
644
645 let paths = create_pipeline(
646 "paths",
647 "vs_path",
648 "fs_path",
649 &layouts.globals,
650 &layouts.instances_with_texture,
651 wgpu::PrimitiveTopology::TriangleStrip,
652 &[Some(wgpu::ColorTargetState {
653 format: surface_format,
654 blend: Some(paths_blend),
655 write_mask: wgpu::ColorWrites::ALL,
656 })],
657 1,
658 );
659
660 let underlines = create_pipeline(
661 "underlines",
662 "vs_underline",
663 "fs_underline",
664 &layouts.globals,
665 &layouts.instances,
666 wgpu::PrimitiveTopology::TriangleStrip,
667 &[Some(color_target.clone())],
668 1,
669 );
670
671 let mono_sprites = create_pipeline(
672 "mono_sprites",
673 "vs_mono_sprite",
674 "fs_mono_sprite",
675 &layouts.globals,
676 &layouts.instances_with_texture,
677 wgpu::PrimitiveTopology::TriangleStrip,
678 &[Some(color_target.clone())],
679 1,
680 );
681
682 let subpixel_sprites = if dual_source_blending {
683 let subpixel_blend = wgpu::BlendState {
684 color: wgpu::BlendComponent {
685 src_factor: wgpu::BlendFactor::Src1,
686 dst_factor: wgpu::BlendFactor::OneMinusSrc1,
687 operation: wgpu::BlendOperation::Add,
688 },
689 alpha: wgpu::BlendComponent {
690 src_factor: wgpu::BlendFactor::One,
691 dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
692 operation: wgpu::BlendOperation::Add,
693 },
694 };
695
696 Some(create_pipeline(
697 "subpixel_sprites",
698 "vs_subpixel_sprite",
699 "fs_subpixel_sprite",
700 &layouts.globals,
701 &layouts.instances_with_texture,
702 wgpu::PrimitiveTopology::TriangleStrip,
703 &[Some(wgpu::ColorTargetState {
704 format: surface_format,
705 blend: Some(subpixel_blend),
706 write_mask: wgpu::ColorWrites::COLOR,
707 })],
708 1,
709 ))
710 } else {
711 None
712 };
713
714 let poly_sprites = create_pipeline(
715 "poly_sprites",
716 "vs_poly_sprite",
717 "fs_poly_sprite",
718 &layouts.globals,
719 &layouts.instances_with_texture,
720 wgpu::PrimitiveTopology::TriangleStrip,
721 &[Some(color_target.clone())],
722 1,
723 );
724
725 let surfaces = create_pipeline(
726 "surfaces",
727 "vs_surface",
728 "fs_surface",
729 &layouts.globals,
730 &layouts.surfaces,
731 wgpu::PrimitiveTopology::TriangleStrip,
732 &[Some(color_target)],
733 1,
734 );
735
736 WgpuPipelines {
737 quads,
738 shadows,
739 path_rasterization,
740 paths,
741 underlines,
742 mono_sprites,
743 subpixel_sprites,
744 poly_sprites,
745 surfaces,
746 }
747 }
748
749 fn create_path_intermediate(
750 device: &wgpu::Device,
751 format: wgpu::TextureFormat,
752 width: u32,
753 height: u32,
754 ) -> (wgpu::Texture, wgpu::TextureView) {
755 let texture = device.create_texture(&wgpu::TextureDescriptor {
756 label: Some("path_intermediate"),
757 size: wgpu::Extent3d {
758 width: width.max(1),
759 height: height.max(1),
760 depth_or_array_layers: 1,
761 },
762 mip_level_count: 1,
763 sample_count: 1,
764 dimension: wgpu::TextureDimension::D2,
765 format,
766 usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::TEXTURE_BINDING,
767 view_formats: &[],
768 });
769 let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
770 (texture, view)
771 }
772
773 fn create_msaa_if_needed(
774 device: &wgpu::Device,
775 format: wgpu::TextureFormat,
776 width: u32,
777 height: u32,
778 sample_count: u32,
779 ) -> Option<(wgpu::Texture, wgpu::TextureView)> {
780 if sample_count <= 1 {
781 return None;
782 }
783 let texture = device.create_texture(&wgpu::TextureDescriptor {
784 label: Some("path_msaa"),
785 size: wgpu::Extent3d {
786 width: width.max(1),
787 height: height.max(1),
788 depth_or_array_layers: 1,
789 },
790 mip_level_count: 1,
791 sample_count,
792 dimension: wgpu::TextureDimension::D2,
793 format,
794 usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
795 view_formats: &[],
796 });
797 let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
798 Some((texture, view))
799 }
800
801 pub fn update_drawable_size(&mut self, size: Size<DevicePixels>) {
802 let width = size.width.0 as u32;
803 let height = size.height.0 as u32;
804
805 if width != self.surface_config.width || height != self.surface_config.height {
806 let clamped_width = width.min(self.max_texture_size);
807 let clamped_height = height.min(self.max_texture_size);
808
809 if clamped_width != width || clamped_height != height {
810 warn!(
811 "Requested surface size ({}, {}) exceeds maximum texture dimension {}. \
812 Clamping to ({}, {}). Window content may not fill the entire window.",
813 width, height, self.max_texture_size, clamped_width, clamped_height
814 );
815 }
816
817 // Wait for any in-flight GPU work to complete before destroying textures
818 if let Err(e) = self.device.poll(wgpu::PollType::Wait {
819 submission_index: None,
820 timeout: None,
821 }) {
822 warn!("Failed to poll device during resize: {e:?}");
823 }
824
825 // Destroy old textures before allocating new ones to avoid GPU memory spikes
826 if let Some(ref texture) = self.path_intermediate_texture {
827 texture.destroy();
828 }
829 if let Some(ref texture) = self.path_msaa_texture {
830 texture.destroy();
831 }
832
833 self.surface_config.width = clamped_width.max(1);
834 self.surface_config.height = clamped_height.max(1);
835 self.surface.configure(&self.device, &self.surface_config);
836
837 // Invalidate intermediate textures - they will be lazily recreated
838 // in draw() after we confirm the surface is healthy. This avoids
839 // panics when the device/surface is in an invalid state during resize.
840 self.path_intermediate_texture = None;
841 self.path_intermediate_view = None;
842 self.path_msaa_texture = None;
843 self.path_msaa_view = None;
844 }
845 }
846
847 fn ensure_intermediate_textures(&mut self) {
848 if self.path_intermediate_texture.is_some() {
849 return;
850 }
851
852 let (path_intermediate_texture, path_intermediate_view) = {
853 let (t, v) = Self::create_path_intermediate(
854 &self.device,
855 self.surface_config.format,
856 self.surface_config.width,
857 self.surface_config.height,
858 );
859 (Some(t), Some(v))
860 };
861 self.path_intermediate_texture = path_intermediate_texture;
862 self.path_intermediate_view = path_intermediate_view;
863
864 let (path_msaa_texture, path_msaa_view) = Self::create_msaa_if_needed(
865 &self.device,
866 self.surface_config.format,
867 self.surface_config.width,
868 self.surface_config.height,
869 self.rendering_params.path_sample_count,
870 )
871 .map(|(t, v)| (Some(t), Some(v)))
872 .unwrap_or((None, None));
873 self.path_msaa_texture = path_msaa_texture;
874 self.path_msaa_view = path_msaa_view;
875 }
876
877 pub fn update_transparency(&mut self, transparent: bool) {
878 let new_alpha_mode = if transparent {
879 self.transparent_alpha_mode
880 } else {
881 self.opaque_alpha_mode
882 };
883
884 if new_alpha_mode != self.surface_config.alpha_mode {
885 self.surface_config.alpha_mode = new_alpha_mode;
886 self.surface.configure(&self.device, &self.surface_config);
887 self.pipelines = Self::create_pipelines(
888 &self.device,
889 &self.bind_group_layouts,
890 self.surface_config.format,
891 self.surface_config.alpha_mode,
892 self.rendering_params.path_sample_count,
893 self.dual_source_blending,
894 );
895 }
896 }
897
898 #[allow(dead_code)]
899 pub fn viewport_size(&self) -> Size<DevicePixels> {
900 Size {
901 width: DevicePixels(self.surface_config.width as i32),
902 height: DevicePixels(self.surface_config.height as i32),
903 }
904 }
905
906 pub fn sprite_atlas(&self) -> &Arc<WgpuAtlas> {
907 &self.atlas
908 }
909
910 pub fn gpu_specs(&self) -> GpuSpecs {
911 GpuSpecs {
912 is_software_emulated: self.adapter_info.device_type == wgpu::DeviceType::Cpu,
913 device_name: self.adapter_info.name.clone(),
914 driver_name: self.adapter_info.driver.clone(),
915 driver_info: self.adapter_info.driver_info.clone(),
916 }
917 }
918
919 pub fn max_texture_size(&self) -> u32 {
920 self.max_texture_size
921 }
922
923 pub fn draw(&mut self, scene: &Scene) {
924 let last_error = self.last_error.lock().unwrap().take();
925 if let Some(error) = last_error {
926 self.failed_frame_count += 1;
927 log::error!(
928 "GPU error during frame (failure {} of 20): {error}",
929 self.failed_frame_count
930 );
931 if self.failed_frame_count > 20 {
932 panic!("Too many consecutive GPU errors. Last error: {error}");
933 }
934 } else {
935 self.failed_frame_count = 0;
936 }
937
938 self.atlas.before_frame();
939
940 let frame = match self.surface.get_current_texture() {
941 Ok(frame) => frame,
942 Err(wgpu::SurfaceError::Lost | wgpu::SurfaceError::Outdated) => {
943 self.surface.configure(&self.device, &self.surface_config);
944 return;
945 }
946 Err(e) => {
947 log::error!("Failed to acquire surface texture: {e}");
948 return;
949 }
950 };
951
952 // Now that we know the surface is healthy, ensure intermediate textures exist
953 self.ensure_intermediate_textures();
954
955 let frame_view = frame
956 .texture
957 .create_view(&wgpu::TextureViewDescriptor::default());
958
959 let gamma_params = GammaParams {
960 gamma_ratios: self.rendering_params.gamma_ratios,
961 grayscale_enhanced_contrast: self.rendering_params.grayscale_enhanced_contrast,
962 subpixel_enhanced_contrast: self.rendering_params.subpixel_enhanced_contrast,
963 _pad: [0.0; 2],
964 };
965
966 let globals = GlobalParams {
967 viewport_size: [
968 self.surface_config.width as f32,
969 self.surface_config.height as f32,
970 ],
971 premultiplied_alpha: if self.surface_config.alpha_mode
972 == wgpu::CompositeAlphaMode::PreMultiplied
973 {
974 1
975 } else {
976 0
977 },
978 pad: 0,
979 };
980
981 let path_globals = GlobalParams {
982 premultiplied_alpha: 0,
983 ..globals
984 };
985
986 self.queue
987 .write_buffer(&self.globals_buffer, 0, bytemuck::bytes_of(&globals));
988 self.queue.write_buffer(
989 &self.globals_buffer,
990 self.path_globals_offset,
991 bytemuck::bytes_of(&path_globals),
992 );
993 self.queue.write_buffer(
994 &self.globals_buffer,
995 self.gamma_offset,
996 bytemuck::bytes_of(&gamma_params),
997 );
998
999 loop {
1000 let mut instance_offset: u64 = 0;
1001 let mut overflow = false;
1002
1003 let mut encoder = self
1004 .device
1005 .create_command_encoder(&wgpu::CommandEncoderDescriptor {
1006 label: Some("main_encoder"),
1007 });
1008
1009 {
1010 let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
1011 label: Some("main_pass"),
1012 color_attachments: &[Some(wgpu::RenderPassColorAttachment {
1013 view: &frame_view,
1014 resolve_target: None,
1015 ops: wgpu::Operations {
1016 load: wgpu::LoadOp::Clear(wgpu::Color::TRANSPARENT),
1017 store: wgpu::StoreOp::Store,
1018 },
1019 depth_slice: None,
1020 })],
1021 depth_stencil_attachment: None,
1022 ..Default::default()
1023 });
1024
1025 for batch in scene.batches() {
1026 let ok = match batch {
1027 PrimitiveBatch::Quads(range) => {
1028 self.draw_quads(&scene.quads[range], &mut instance_offset, &mut pass)
1029 }
1030 PrimitiveBatch::Shadows(range) => self.draw_shadows(
1031 &scene.shadows[range],
1032 &mut instance_offset,
1033 &mut pass,
1034 ),
1035 PrimitiveBatch::Paths(range) => {
1036 let paths = &scene.paths[range];
1037 if paths.is_empty() {
1038 continue;
1039 }
1040
1041 drop(pass);
1042
1043 let did_draw = self.draw_paths_to_intermediate(
1044 &mut encoder,
1045 paths,
1046 &mut instance_offset,
1047 );
1048
1049 pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
1050 label: Some("main_pass_continued"),
1051 color_attachments: &[Some(wgpu::RenderPassColorAttachment {
1052 view: &frame_view,
1053 resolve_target: None,
1054 ops: wgpu::Operations {
1055 load: wgpu::LoadOp::Load,
1056 store: wgpu::StoreOp::Store,
1057 },
1058 depth_slice: None,
1059 })],
1060 depth_stencil_attachment: None,
1061 ..Default::default()
1062 });
1063
1064 if did_draw {
1065 self.draw_paths_from_intermediate(
1066 paths,
1067 &mut instance_offset,
1068 &mut pass,
1069 )
1070 } else {
1071 false
1072 }
1073 }
1074 PrimitiveBatch::Underlines(range) => self.draw_underlines(
1075 &scene.underlines[range],
1076 &mut instance_offset,
1077 &mut pass,
1078 ),
1079 PrimitiveBatch::MonochromeSprites { texture_id, range } => self
1080 .draw_monochrome_sprites(
1081 &scene.monochrome_sprites[range],
1082 texture_id,
1083 &mut instance_offset,
1084 &mut pass,
1085 ),
1086 PrimitiveBatch::SubpixelSprites { texture_id, range } => self
1087 .draw_subpixel_sprites(
1088 &scene.subpixel_sprites[range],
1089 texture_id,
1090 &mut instance_offset,
1091 &mut pass,
1092 ),
1093 PrimitiveBatch::PolychromeSprites { texture_id, range } => self
1094 .draw_polychrome_sprites(
1095 &scene.polychrome_sprites[range],
1096 texture_id,
1097 &mut instance_offset,
1098 &mut pass,
1099 ),
1100 PrimitiveBatch::Surfaces(_surfaces) => {
1101 // Surfaces are macOS-only for video playback
1102 // Not implemented for Linux/wgpu
1103 true
1104 }
1105 };
1106 if !ok {
1107 overflow = true;
1108 break;
1109 }
1110 }
1111 }
1112
1113 if overflow {
1114 drop(encoder);
1115 if self.instance_buffer_capacity >= 256 * 1024 * 1024 {
1116 log::error!(
1117 "instance buffer size grew too large: {}",
1118 self.instance_buffer_capacity
1119 );
1120 frame.present();
1121 return;
1122 }
1123 self.grow_instance_buffer();
1124 continue;
1125 }
1126
1127 self.queue.submit(std::iter::once(encoder.finish()));
1128 frame.present();
1129 return;
1130 }
1131 }
1132
1133 fn draw_quads(
1134 &self,
1135 quads: &[Quad],
1136 instance_offset: &mut u64,
1137 pass: &mut wgpu::RenderPass<'_>,
1138 ) -> bool {
1139 let data = unsafe { Self::instance_bytes(quads) };
1140 self.draw_instances(
1141 data,
1142 quads.len() as u32,
1143 &self.pipelines.quads,
1144 instance_offset,
1145 pass,
1146 )
1147 }
1148
1149 fn draw_shadows(
1150 &self,
1151 shadows: &[Shadow],
1152 instance_offset: &mut u64,
1153 pass: &mut wgpu::RenderPass<'_>,
1154 ) -> bool {
1155 let data = unsafe { Self::instance_bytes(shadows) };
1156 self.draw_instances(
1157 data,
1158 shadows.len() as u32,
1159 &self.pipelines.shadows,
1160 instance_offset,
1161 pass,
1162 )
1163 }
1164
1165 fn draw_underlines(
1166 &self,
1167 underlines: &[Underline],
1168 instance_offset: &mut u64,
1169 pass: &mut wgpu::RenderPass<'_>,
1170 ) -> bool {
1171 let data = unsafe { Self::instance_bytes(underlines) };
1172 self.draw_instances(
1173 data,
1174 underlines.len() as u32,
1175 &self.pipelines.underlines,
1176 instance_offset,
1177 pass,
1178 )
1179 }
1180
1181 fn draw_monochrome_sprites(
1182 &self,
1183 sprites: &[MonochromeSprite],
1184 texture_id: AtlasTextureId,
1185 instance_offset: &mut u64,
1186 pass: &mut wgpu::RenderPass<'_>,
1187 ) -> bool {
1188 let tex_info = self.atlas.get_texture_info(texture_id);
1189 let data = unsafe { Self::instance_bytes(sprites) };
1190 self.draw_instances_with_texture(
1191 data,
1192 sprites.len() as u32,
1193 &tex_info.view,
1194 &self.pipelines.mono_sprites,
1195 instance_offset,
1196 pass,
1197 )
1198 }
1199
1200 fn draw_subpixel_sprites(
1201 &self,
1202 sprites: &[SubpixelSprite],
1203 texture_id: AtlasTextureId,
1204 instance_offset: &mut u64,
1205 pass: &mut wgpu::RenderPass<'_>,
1206 ) -> bool {
1207 let tex_info = self.atlas.get_texture_info(texture_id);
1208 let data = unsafe { Self::instance_bytes(sprites) };
1209 let pipeline = self
1210 .pipelines
1211 .subpixel_sprites
1212 .as_ref()
1213 .unwrap_or(&self.pipelines.mono_sprites);
1214 self.draw_instances_with_texture(
1215 data,
1216 sprites.len() as u32,
1217 &tex_info.view,
1218 pipeline,
1219 instance_offset,
1220 pass,
1221 )
1222 }
1223
1224 fn draw_polychrome_sprites(
1225 &self,
1226 sprites: &[PolychromeSprite],
1227 texture_id: AtlasTextureId,
1228 instance_offset: &mut u64,
1229 pass: &mut wgpu::RenderPass<'_>,
1230 ) -> bool {
1231 let tex_info = self.atlas.get_texture_info(texture_id);
1232 let data = unsafe { Self::instance_bytes(sprites) };
1233 self.draw_instances_with_texture(
1234 data,
1235 sprites.len() as u32,
1236 &tex_info.view,
1237 &self.pipelines.poly_sprites,
1238 instance_offset,
1239 pass,
1240 )
1241 }
1242
1243 fn draw_instances(
1244 &self,
1245 data: &[u8],
1246 instance_count: u32,
1247 pipeline: &wgpu::RenderPipeline,
1248 instance_offset: &mut u64,
1249 pass: &mut wgpu::RenderPass<'_>,
1250 ) -> bool {
1251 if instance_count == 0 {
1252 return true;
1253 }
1254 let Some((offset, size)) = self.write_to_instance_buffer(instance_offset, data) else {
1255 return false;
1256 };
1257 let bind_group = self.device.create_bind_group(&wgpu::BindGroupDescriptor {
1258 label: None,
1259 layout: &self.bind_group_layouts.instances,
1260 entries: &[wgpu::BindGroupEntry {
1261 binding: 0,
1262 resource: self.instance_binding(offset, size),
1263 }],
1264 });
1265 pass.set_pipeline(pipeline);
1266 pass.set_bind_group(0, &self.globals_bind_group, &[]);
1267 pass.set_bind_group(1, &bind_group, &[]);
1268 pass.draw(0..4, 0..instance_count);
1269 true
1270 }
1271
1272 fn draw_instances_with_texture(
1273 &self,
1274 data: &[u8],
1275 instance_count: u32,
1276 texture_view: &wgpu::TextureView,
1277 pipeline: &wgpu::RenderPipeline,
1278 instance_offset: &mut u64,
1279 pass: &mut wgpu::RenderPass<'_>,
1280 ) -> bool {
1281 if instance_count == 0 {
1282 return true;
1283 }
1284 let Some((offset, size)) = self.write_to_instance_buffer(instance_offset, data) else {
1285 return false;
1286 };
1287 let bind_group = self.device.create_bind_group(&wgpu::BindGroupDescriptor {
1288 label: None,
1289 layout: &self.bind_group_layouts.instances_with_texture,
1290 entries: &[
1291 wgpu::BindGroupEntry {
1292 binding: 0,
1293 resource: self.instance_binding(offset, size),
1294 },
1295 wgpu::BindGroupEntry {
1296 binding: 1,
1297 resource: wgpu::BindingResource::TextureView(texture_view),
1298 },
1299 wgpu::BindGroupEntry {
1300 binding: 2,
1301 resource: wgpu::BindingResource::Sampler(&self.atlas_sampler),
1302 },
1303 ],
1304 });
1305 pass.set_pipeline(pipeline);
1306 pass.set_bind_group(0, &self.globals_bind_group, &[]);
1307 pass.set_bind_group(1, &bind_group, &[]);
1308 pass.draw(0..4, 0..instance_count);
1309 true
1310 }
1311
1312 unsafe fn instance_bytes<T>(instances: &[T]) -> &[u8] {
1313 unsafe {
1314 std::slice::from_raw_parts(
1315 instances.as_ptr() as *const u8,
1316 std::mem::size_of_val(instances),
1317 )
1318 }
1319 }
1320
1321 fn draw_paths_from_intermediate(
1322 &self,
1323 paths: &[Path<ScaledPixels>],
1324 instance_offset: &mut u64,
1325 pass: &mut wgpu::RenderPass<'_>,
1326 ) -> bool {
1327 let first_path = &paths[0];
1328 let sprites: Vec<PathSprite> = if paths.last().map(|p| &p.order) == Some(&first_path.order)
1329 {
1330 paths
1331 .iter()
1332 .map(|p| PathSprite {
1333 bounds: p.clipped_bounds(),
1334 })
1335 .collect()
1336 } else {
1337 let mut bounds = first_path.clipped_bounds();
1338 for path in paths.iter().skip(1) {
1339 bounds = bounds.union(&path.clipped_bounds());
1340 }
1341 vec![PathSprite { bounds }]
1342 };
1343
1344 let Some(path_intermediate_view) = self.path_intermediate_view.as_ref() else {
1345 return true;
1346 };
1347
1348 let sprite_data = unsafe { Self::instance_bytes(&sprites) };
1349 self.draw_instances_with_texture(
1350 sprite_data,
1351 sprites.len() as u32,
1352 path_intermediate_view,
1353 &self.pipelines.paths,
1354 instance_offset,
1355 pass,
1356 )
1357 }
1358
1359 fn draw_paths_to_intermediate(
1360 &self,
1361 encoder: &mut wgpu::CommandEncoder,
1362 paths: &[Path<ScaledPixels>],
1363 instance_offset: &mut u64,
1364 ) -> bool {
1365 let mut vertices = Vec::new();
1366 for path in paths {
1367 let bounds = path.clipped_bounds();
1368 vertices.extend(path.vertices.iter().map(|v| PathRasterizationVertex {
1369 xy_position: v.xy_position,
1370 st_position: v.st_position,
1371 color: path.color,
1372 bounds,
1373 }));
1374 }
1375
1376 if vertices.is_empty() {
1377 return true;
1378 }
1379
1380 let vertex_data = unsafe { Self::instance_bytes(&vertices) };
1381 let Some((vertex_offset, vertex_size)) =
1382 self.write_to_instance_buffer(instance_offset, vertex_data)
1383 else {
1384 return false;
1385 };
1386
1387 let data_bind_group = self.device.create_bind_group(&wgpu::BindGroupDescriptor {
1388 label: Some("path_rasterization_bind_group"),
1389 layout: &self.bind_group_layouts.instances,
1390 entries: &[wgpu::BindGroupEntry {
1391 binding: 0,
1392 resource: self.instance_binding(vertex_offset, vertex_size),
1393 }],
1394 });
1395
1396 let Some(path_intermediate_view) = self.path_intermediate_view.as_ref() else {
1397 return true;
1398 };
1399
1400 let (target_view, resolve_target) = if let Some(ref msaa_view) = self.path_msaa_view {
1401 (msaa_view, Some(path_intermediate_view))
1402 } else {
1403 (path_intermediate_view, None)
1404 };
1405
1406 {
1407 let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
1408 label: Some("path_rasterization_pass"),
1409 color_attachments: &[Some(wgpu::RenderPassColorAttachment {
1410 view: target_view,
1411 resolve_target,
1412 ops: wgpu::Operations {
1413 load: wgpu::LoadOp::Clear(wgpu::Color::TRANSPARENT),
1414 store: wgpu::StoreOp::Store,
1415 },
1416 depth_slice: None,
1417 })],
1418 depth_stencil_attachment: None,
1419 ..Default::default()
1420 });
1421
1422 pass.set_pipeline(&self.pipelines.path_rasterization);
1423 pass.set_bind_group(0, &self.path_globals_bind_group, &[]);
1424 pass.set_bind_group(1, &data_bind_group, &[]);
1425 pass.draw(0..vertices.len() as u32, 0..1);
1426 }
1427
1428 true
1429 }
1430
1431 fn grow_instance_buffer(&mut self) {
1432 let new_capacity = self.instance_buffer_capacity * 2;
1433 log::info!("increased instance buffer size to {}", new_capacity);
1434 self.instance_buffer = self.device.create_buffer(&wgpu::BufferDescriptor {
1435 label: Some("instance_buffer"),
1436 size: new_capacity,
1437 usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
1438 mapped_at_creation: false,
1439 });
1440 self.instance_buffer_capacity = new_capacity;
1441 }
1442
1443 fn write_to_instance_buffer(
1444 &self,
1445 instance_offset: &mut u64,
1446 data: &[u8],
1447 ) -> Option<(u64, NonZeroU64)> {
1448 let offset = (*instance_offset).next_multiple_of(self.storage_buffer_alignment);
1449 let size = (data.len() as u64).max(16);
1450 if offset + size > self.instance_buffer_capacity {
1451 return None;
1452 }
1453 self.queue.write_buffer(&self.instance_buffer, offset, data);
1454 *instance_offset = offset + size;
1455 Some((offset, NonZeroU64::new(size).expect("size is at least 16")))
1456 }
1457
1458 fn instance_binding(&self, offset: u64, size: NonZeroU64) -> wgpu::BindingResource<'_> {
1459 wgpu::BindingResource::Buffer(wgpu::BufferBinding {
1460 buffer: &self.instance_buffer,
1461 offset,
1462 size: Some(size),
1463 })
1464 }
1465
1466 pub fn destroy(&mut self) {
1467 // wgpu resources are automatically cleaned up when dropped
1468 }
1469}
1470
1471struct RenderingParameters {
1472 path_sample_count: u32,
1473 gamma_ratios: [f32; 4],
1474 grayscale_enhanced_contrast: f32,
1475 subpixel_enhanced_contrast: f32,
1476}
1477
1478impl RenderingParameters {
1479 fn new(adapter: &wgpu::Adapter, surface_format: wgpu::TextureFormat) -> Self {
1480 use std::env;
1481
1482 let format_features = adapter.get_texture_format_features(surface_format);
1483 let path_sample_count = [4, 2, 1]
1484 .into_iter()
1485 .find(|&n| format_features.flags.sample_count_supported(n))
1486 .unwrap_or(1);
1487
1488 let gamma = env::var("ZED_FONTS_GAMMA")
1489 .ok()
1490 .and_then(|v| v.parse().ok())
1491 .unwrap_or(1.8_f32)
1492 .clamp(1.0, 2.2);
1493 let gamma_ratios = get_gamma_correction_ratios(gamma);
1494
1495 let grayscale_enhanced_contrast = env::var("ZED_FONTS_GRAYSCALE_ENHANCED_CONTRAST")
1496 .ok()
1497 .and_then(|v| v.parse().ok())
1498 .unwrap_or(1.0_f32)
1499 .max(0.0);
1500
1501 let subpixel_enhanced_contrast = env::var("ZED_FONTS_SUBPIXEL_ENHANCED_CONTRAST")
1502 .ok()
1503 .and_then(|v| v.parse().ok())
1504 .unwrap_or(0.5_f32)
1505 .max(0.0);
1506
1507 Self {
1508 path_sample_count,
1509 gamma_ratios,
1510 grayscale_enhanced_contrast,
1511 subpixel_enhanced_contrast,
1512 }
1513 }
1514}