1use super::{WgpuAtlas, WgpuContext};
2use crate::{
3 AtlasTextureId, Background, Bounds, DevicePixels, GpuSpecs, MonochromeSprite, Path, Point,
4 PolychromeSprite, PrimitiveBatch, Quad, ScaledPixels, Scene, Shadow, Size, SubpixelSprite,
5 Underline, get_gamma_correction_ratios,
6};
7use bytemuck::{Pod, Zeroable};
8use log::warn;
9use raw_window_handle::{HasDisplayHandle, HasWindowHandle};
10use std::num::NonZeroU64;
11use std::sync::Arc;
12
13#[repr(C)]
14#[derive(Clone, Copy, Pod, Zeroable)]
15struct GlobalParams {
16 viewport_size: [f32; 2],
17 premultiplied_alpha: u32,
18 pad: u32,
19}
20
21#[repr(C)]
22#[derive(Clone, Copy, Pod, Zeroable)]
23struct PodBounds {
24 origin: [f32; 2],
25 size: [f32; 2],
26}
27
28impl From<Bounds<ScaledPixels>> for PodBounds {
29 fn from(bounds: Bounds<ScaledPixels>) -> Self {
30 Self {
31 origin: [bounds.origin.x.0, bounds.origin.y.0],
32 size: [bounds.size.width.0, bounds.size.height.0],
33 }
34 }
35}
36
37#[repr(C)]
38#[derive(Clone, Copy, Pod, Zeroable)]
39struct SurfaceParams {
40 bounds: PodBounds,
41 content_mask: PodBounds,
42}
43
44#[repr(C)]
45#[derive(Clone, Copy, Pod, Zeroable)]
46struct GammaParams {
47 gamma_ratios: [f32; 4],
48 grayscale_enhanced_contrast: f32,
49 subpixel_enhanced_contrast: f32,
50 _pad: [f32; 2],
51}
52
53#[derive(Clone, Debug)]
54#[repr(C)]
55struct PathSprite {
56 bounds: Bounds<ScaledPixels>,
57}
58
59#[derive(Clone, Debug)]
60#[repr(C)]
61struct PathRasterizationVertex {
62 xy_position: Point<ScaledPixels>,
63 st_position: Point<f32>,
64 color: Background,
65 bounds: Bounds<ScaledPixels>,
66}
67
68pub struct WgpuSurfaceConfig {
69 pub size: Size<DevicePixels>,
70 pub transparent: bool,
71}
72
73struct WgpuPipelines {
74 quads: wgpu::RenderPipeline,
75 shadows: wgpu::RenderPipeline,
76 path_rasterization: wgpu::RenderPipeline,
77 paths: wgpu::RenderPipeline,
78 underlines: wgpu::RenderPipeline,
79 mono_sprites: wgpu::RenderPipeline,
80 subpixel_sprites: Option<wgpu::RenderPipeline>,
81 poly_sprites: wgpu::RenderPipeline,
82 #[allow(dead_code)]
83 surfaces: wgpu::RenderPipeline,
84}
85
86struct WgpuBindGroupLayouts {
87 globals: wgpu::BindGroupLayout,
88 instances: wgpu::BindGroupLayout,
89 instances_with_texture: wgpu::BindGroupLayout,
90 surfaces: wgpu::BindGroupLayout,
91}
92
93pub struct WgpuRenderer {
94 device: Arc<wgpu::Device>,
95 queue: Arc<wgpu::Queue>,
96 surface: wgpu::Surface<'static>,
97 surface_config: wgpu::SurfaceConfiguration,
98 pipelines: WgpuPipelines,
99 bind_group_layouts: WgpuBindGroupLayouts,
100 atlas: Arc<WgpuAtlas>,
101 atlas_sampler: wgpu::Sampler,
102 globals_buffer: wgpu::Buffer,
103 path_globals_offset: u64,
104 gamma_offset: u64,
105 globals_bind_group: wgpu::BindGroup,
106 path_globals_bind_group: wgpu::BindGroup,
107 instance_buffer: wgpu::Buffer,
108 instance_buffer_capacity: u64,
109 storage_buffer_alignment: u64,
110 path_intermediate_texture: Option<wgpu::Texture>,
111 path_intermediate_view: Option<wgpu::TextureView>,
112 path_msaa_texture: Option<wgpu::Texture>,
113 path_msaa_view: Option<wgpu::TextureView>,
114 rendering_params: RenderingParameters,
115 dual_source_blending: bool,
116 adapter_info: wgpu::AdapterInfo,
117 transparent_alpha_mode: wgpu::CompositeAlphaMode,
118 opaque_alpha_mode: wgpu::CompositeAlphaMode,
119}
120
121impl WgpuRenderer {
122 /// Creates a new WgpuRenderer from raw window handles.
123 ///
124 /// # Safety
125 /// The caller must ensure that the window handle remains valid for the lifetime
126 /// of the returned renderer.
127 pub fn new<W: HasWindowHandle + HasDisplayHandle>(
128 gpu_context: &mut Option<WgpuContext>,
129 window: &W,
130 config: WgpuSurfaceConfig,
131 ) -> anyhow::Result<Self> {
132 let window_handle = window
133 .window_handle()
134 .map_err(|e| anyhow::anyhow!("Failed to get window handle: {e}"))?;
135 let display_handle = window
136 .display_handle()
137 .map_err(|e| anyhow::anyhow!("Failed to get display handle: {e}"))?;
138
139 let target = wgpu::SurfaceTargetUnsafe::RawHandle {
140 raw_display_handle: display_handle.as_raw(),
141 raw_window_handle: window_handle.as_raw(),
142 };
143
144 // Use the existing context's instance if available, otherwise create a new one.
145 // The surface must be created with the same instance that will be used for
146 // adapter selection, otherwise wgpu will panic.
147 let instance = gpu_context
148 .as_ref()
149 .map(|ctx| ctx.instance.clone())
150 .unwrap_or_else(WgpuContext::instance);
151
152 // Safety: The caller guarantees that the window handle is valid for the
153 // lifetime of this renderer. In practice, the RawWindow struct is created
154 // from the native window handles and the surface is dropped before the window.
155 let surface = unsafe {
156 instance
157 .create_surface_unsafe(target)
158 .map_err(|e| anyhow::anyhow!("Failed to create surface: {e}"))?
159 };
160
161 let context = match gpu_context {
162 Some(context) => {
163 context.check_compatible_with_surface(&surface)?;
164 context
165 }
166 None => gpu_context.insert(WgpuContext::new(instance, &surface)?),
167 };
168
169 let surface_caps = surface.get_capabilities(&context.adapter);
170 let preferred_formats = [
171 wgpu::TextureFormat::Bgra8Unorm,
172 wgpu::TextureFormat::Rgba8Unorm,
173 ];
174 let surface_format = preferred_formats
175 .iter()
176 .find(|f| surface_caps.formats.contains(f))
177 .copied()
178 .or_else(|| surface_caps.formats.iter().find(|f| !f.is_srgb()).copied())
179 .or_else(|| surface_caps.formats.first().copied())
180 .ok_or_else(|| {
181 anyhow::anyhow!(
182 "Surface reports no supported texture formats for adapter {:?}",
183 context.adapter.get_info().name
184 )
185 })?;
186
187 let pick_alpha_mode =
188 |preferences: &[wgpu::CompositeAlphaMode]| -> anyhow::Result<wgpu::CompositeAlphaMode> {
189 preferences
190 .iter()
191 .find(|p| surface_caps.alpha_modes.contains(p))
192 .copied()
193 .or_else(|| surface_caps.alpha_modes.first().copied())
194 .ok_or_else(|| {
195 anyhow::anyhow!(
196 "Surface reports no supported alpha modes for adapter {:?}",
197 context.adapter.get_info().name
198 )
199 })
200 };
201
202 let transparent_alpha_mode = pick_alpha_mode(&[
203 wgpu::CompositeAlphaMode::PreMultiplied,
204 wgpu::CompositeAlphaMode::Inherit,
205 ])?;
206
207 let opaque_alpha_mode = pick_alpha_mode(&[
208 wgpu::CompositeAlphaMode::Opaque,
209 wgpu::CompositeAlphaMode::Inherit,
210 ])?;
211
212 let alpha_mode = if config.transparent {
213 transparent_alpha_mode
214 } else {
215 opaque_alpha_mode
216 };
217
218 let surface_config = wgpu::SurfaceConfiguration {
219 usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
220 format: surface_format,
221 width: config.size.width.0 as u32,
222 height: config.size.height.0 as u32,
223 present_mode: wgpu::PresentMode::Fifo,
224 desired_maximum_frame_latency: 2,
225 alpha_mode,
226 view_formats: vec![],
227 };
228 surface.configure(&context.device, &surface_config);
229
230 let device = Arc::clone(&context.device);
231 let queue = Arc::clone(&context.queue);
232 let dual_source_blending = context.supports_dual_source_blending();
233
234 let rendering_params = RenderingParameters::new(&context.adapter, surface_format);
235 let bind_group_layouts = Self::create_bind_group_layouts(&device);
236 let pipelines = Self::create_pipelines(
237 &device,
238 &bind_group_layouts,
239 surface_format,
240 alpha_mode,
241 rendering_params.path_sample_count,
242 dual_source_blending,
243 );
244
245 let atlas = Arc::new(WgpuAtlas::new(Arc::clone(&device), Arc::clone(&queue)));
246 let atlas_sampler = device.create_sampler(&wgpu::SamplerDescriptor {
247 label: Some("atlas_sampler"),
248 mag_filter: wgpu::FilterMode::Linear,
249 min_filter: wgpu::FilterMode::Linear,
250 ..Default::default()
251 });
252
253 let uniform_alignment = device.limits().min_uniform_buffer_offset_alignment as u64;
254 let globals_size = std::mem::size_of::<GlobalParams>() as u64;
255 let gamma_size = std::mem::size_of::<GammaParams>() as u64;
256 let path_globals_offset = globals_size.next_multiple_of(uniform_alignment);
257 let gamma_offset = (path_globals_offset + globals_size).next_multiple_of(uniform_alignment);
258
259 let globals_buffer = device.create_buffer(&wgpu::BufferDescriptor {
260 label: Some("globals_buffer"),
261 size: gamma_offset + gamma_size,
262 usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
263 mapped_at_creation: false,
264 });
265
266 let storage_buffer_alignment = device.limits().min_storage_buffer_offset_alignment as u64;
267 let initial_instance_buffer_capacity = 2 * 1024 * 1024;
268 let instance_buffer = device.create_buffer(&wgpu::BufferDescriptor {
269 label: Some("instance_buffer"),
270 size: initial_instance_buffer_capacity,
271 usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
272 mapped_at_creation: false,
273 });
274
275 let globals_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
276 label: Some("globals_bind_group"),
277 layout: &bind_group_layouts.globals,
278 entries: &[
279 wgpu::BindGroupEntry {
280 binding: 0,
281 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
282 buffer: &globals_buffer,
283 offset: 0,
284 size: Some(NonZeroU64::new(globals_size).unwrap()),
285 }),
286 },
287 wgpu::BindGroupEntry {
288 binding: 1,
289 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
290 buffer: &globals_buffer,
291 offset: gamma_offset,
292 size: Some(NonZeroU64::new(gamma_size).unwrap()),
293 }),
294 },
295 ],
296 });
297
298 let path_globals_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
299 label: Some("path_globals_bind_group"),
300 layout: &bind_group_layouts.globals,
301 entries: &[
302 wgpu::BindGroupEntry {
303 binding: 0,
304 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
305 buffer: &globals_buffer,
306 offset: path_globals_offset,
307 size: Some(NonZeroU64::new(globals_size).unwrap()),
308 }),
309 },
310 wgpu::BindGroupEntry {
311 binding: 1,
312 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
313 buffer: &globals_buffer,
314 offset: gamma_offset,
315 size: Some(NonZeroU64::new(gamma_size).unwrap()),
316 }),
317 },
318 ],
319 });
320
321 let adapter_info = context.adapter.get_info();
322
323 Ok(Self {
324 device,
325 queue,
326 surface,
327 surface_config,
328 pipelines,
329 bind_group_layouts,
330 atlas,
331 atlas_sampler,
332 globals_buffer,
333 path_globals_offset,
334 gamma_offset,
335 globals_bind_group,
336 path_globals_bind_group,
337 instance_buffer,
338 instance_buffer_capacity: initial_instance_buffer_capacity,
339 storage_buffer_alignment,
340 // Defer intermediate texture creation to first draw call via ensure_intermediate_textures().
341 // This avoids panics when the device/surface is in an invalid state during initialization.
342 path_intermediate_texture: None,
343 path_intermediate_view: None,
344 path_msaa_texture: None,
345 path_msaa_view: None,
346 rendering_params,
347 dual_source_blending,
348 adapter_info,
349 transparent_alpha_mode,
350 opaque_alpha_mode,
351 })
352 }
353
354 fn create_bind_group_layouts(device: &wgpu::Device) -> WgpuBindGroupLayouts {
355 let globals =
356 device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
357 label: Some("globals_layout"),
358 entries: &[
359 wgpu::BindGroupLayoutEntry {
360 binding: 0,
361 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
362 ty: wgpu::BindingType::Buffer {
363 ty: wgpu::BufferBindingType::Uniform,
364 has_dynamic_offset: false,
365 min_binding_size: NonZeroU64::new(
366 std::mem::size_of::<GlobalParams>() as u64
367 ),
368 },
369 count: None,
370 },
371 wgpu::BindGroupLayoutEntry {
372 binding: 1,
373 visibility: wgpu::ShaderStages::FRAGMENT,
374 ty: wgpu::BindingType::Buffer {
375 ty: wgpu::BufferBindingType::Uniform,
376 has_dynamic_offset: false,
377 min_binding_size: NonZeroU64::new(
378 std::mem::size_of::<GammaParams>() as u64
379 ),
380 },
381 count: None,
382 },
383 ],
384 });
385
386 let storage_buffer_entry = |binding: u32| wgpu::BindGroupLayoutEntry {
387 binding,
388 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
389 ty: wgpu::BindingType::Buffer {
390 ty: wgpu::BufferBindingType::Storage { read_only: true },
391 has_dynamic_offset: false,
392 min_binding_size: None,
393 },
394 count: None,
395 };
396
397 let instances = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
398 label: Some("instances_layout"),
399 entries: &[storage_buffer_entry(0)],
400 });
401
402 let instances_with_texture =
403 device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
404 label: Some("instances_with_texture_layout"),
405 entries: &[
406 storage_buffer_entry(0),
407 wgpu::BindGroupLayoutEntry {
408 binding: 1,
409 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
410 ty: wgpu::BindingType::Texture {
411 sample_type: wgpu::TextureSampleType::Float { filterable: true },
412 view_dimension: wgpu::TextureViewDimension::D2,
413 multisampled: false,
414 },
415 count: None,
416 },
417 wgpu::BindGroupLayoutEntry {
418 binding: 2,
419 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
420 ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
421 count: None,
422 },
423 ],
424 });
425
426 let surfaces = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
427 label: Some("surfaces_layout"),
428 entries: &[
429 wgpu::BindGroupLayoutEntry {
430 binding: 0,
431 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
432 ty: wgpu::BindingType::Buffer {
433 ty: wgpu::BufferBindingType::Uniform,
434 has_dynamic_offset: false,
435 min_binding_size: NonZeroU64::new(
436 std::mem::size_of::<SurfaceParams>() as u64
437 ),
438 },
439 count: None,
440 },
441 wgpu::BindGroupLayoutEntry {
442 binding: 1,
443 visibility: wgpu::ShaderStages::FRAGMENT,
444 ty: wgpu::BindingType::Texture {
445 sample_type: wgpu::TextureSampleType::Float { filterable: true },
446 view_dimension: wgpu::TextureViewDimension::D2,
447 multisampled: false,
448 },
449 count: None,
450 },
451 wgpu::BindGroupLayoutEntry {
452 binding: 2,
453 visibility: wgpu::ShaderStages::FRAGMENT,
454 ty: wgpu::BindingType::Texture {
455 sample_type: wgpu::TextureSampleType::Float { filterable: true },
456 view_dimension: wgpu::TextureViewDimension::D2,
457 multisampled: false,
458 },
459 count: None,
460 },
461 wgpu::BindGroupLayoutEntry {
462 binding: 3,
463 visibility: wgpu::ShaderStages::FRAGMENT,
464 ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
465 count: None,
466 },
467 ],
468 });
469
470 WgpuBindGroupLayouts {
471 globals,
472 instances,
473 instances_with_texture,
474 surfaces,
475 }
476 }
477
478 fn create_pipelines(
479 device: &wgpu::Device,
480 layouts: &WgpuBindGroupLayouts,
481 surface_format: wgpu::TextureFormat,
482 alpha_mode: wgpu::CompositeAlphaMode,
483 path_sample_count: u32,
484 dual_source_blending: bool,
485 ) -> WgpuPipelines {
486 let shader_source = include_str!("shaders.wgsl");
487 let shader_module = device.create_shader_module(wgpu::ShaderModuleDescriptor {
488 label: Some("gpui_shaders"),
489 source: wgpu::ShaderSource::Wgsl(shader_source.into()),
490 });
491
492 let blend_mode = match alpha_mode {
493 wgpu::CompositeAlphaMode::PreMultiplied => {
494 wgpu::BlendState::PREMULTIPLIED_ALPHA_BLENDING
495 }
496 _ => wgpu::BlendState::ALPHA_BLENDING,
497 };
498
499 let color_target = wgpu::ColorTargetState {
500 format: surface_format,
501 blend: Some(blend_mode),
502 write_mask: wgpu::ColorWrites::ALL,
503 };
504
505 let create_pipeline = |name: &str,
506 vs_entry: &str,
507 fs_entry: &str,
508 globals_layout: &wgpu::BindGroupLayout,
509 data_layout: &wgpu::BindGroupLayout,
510 topology: wgpu::PrimitiveTopology,
511 color_targets: &[Option<wgpu::ColorTargetState>],
512 sample_count: u32| {
513 let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
514 label: Some(&format!("{name}_layout")),
515 bind_group_layouts: &[globals_layout, data_layout],
516 immediate_size: 0,
517 });
518
519 device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
520 label: Some(name),
521 layout: Some(&pipeline_layout),
522 vertex: wgpu::VertexState {
523 module: &shader_module,
524 entry_point: Some(vs_entry),
525 buffers: &[],
526 compilation_options: wgpu::PipelineCompilationOptions::default(),
527 },
528 fragment: Some(wgpu::FragmentState {
529 module: &shader_module,
530 entry_point: Some(fs_entry),
531 targets: color_targets,
532 compilation_options: wgpu::PipelineCompilationOptions::default(),
533 }),
534 primitive: wgpu::PrimitiveState {
535 topology,
536 strip_index_format: None,
537 front_face: wgpu::FrontFace::Ccw,
538 cull_mode: None,
539 polygon_mode: wgpu::PolygonMode::Fill,
540 unclipped_depth: false,
541 conservative: false,
542 },
543 depth_stencil: None,
544 multisample: wgpu::MultisampleState {
545 count: sample_count,
546 mask: !0,
547 alpha_to_coverage_enabled: false,
548 },
549 multiview_mask: None,
550 cache: None,
551 })
552 };
553
554 let quads = create_pipeline(
555 "quads",
556 "vs_quad",
557 "fs_quad",
558 &layouts.globals,
559 &layouts.instances,
560 wgpu::PrimitiveTopology::TriangleStrip,
561 &[Some(color_target.clone())],
562 1,
563 );
564
565 let shadows = create_pipeline(
566 "shadows",
567 "vs_shadow",
568 "fs_shadow",
569 &layouts.globals,
570 &layouts.instances,
571 wgpu::PrimitiveTopology::TriangleStrip,
572 &[Some(color_target.clone())],
573 1,
574 );
575
576 let path_rasterization = create_pipeline(
577 "path_rasterization",
578 "vs_path_rasterization",
579 "fs_path_rasterization",
580 &layouts.globals,
581 &layouts.instances,
582 wgpu::PrimitiveTopology::TriangleList,
583 &[Some(wgpu::ColorTargetState {
584 format: surface_format,
585 blend: Some(wgpu::BlendState::PREMULTIPLIED_ALPHA_BLENDING),
586 write_mask: wgpu::ColorWrites::ALL,
587 })],
588 path_sample_count,
589 );
590
591 let paths_blend = wgpu::BlendState {
592 color: wgpu::BlendComponent {
593 src_factor: wgpu::BlendFactor::One,
594 dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
595 operation: wgpu::BlendOperation::Add,
596 },
597 alpha: wgpu::BlendComponent {
598 src_factor: wgpu::BlendFactor::One,
599 dst_factor: wgpu::BlendFactor::One,
600 operation: wgpu::BlendOperation::Add,
601 },
602 };
603
604 let paths = create_pipeline(
605 "paths",
606 "vs_path",
607 "fs_path",
608 &layouts.globals,
609 &layouts.instances_with_texture,
610 wgpu::PrimitiveTopology::TriangleStrip,
611 &[Some(wgpu::ColorTargetState {
612 format: surface_format,
613 blend: Some(paths_blend),
614 write_mask: wgpu::ColorWrites::ALL,
615 })],
616 1,
617 );
618
619 let underlines = create_pipeline(
620 "underlines",
621 "vs_underline",
622 "fs_underline",
623 &layouts.globals,
624 &layouts.instances,
625 wgpu::PrimitiveTopology::TriangleStrip,
626 &[Some(color_target.clone())],
627 1,
628 );
629
630 let mono_sprites = create_pipeline(
631 "mono_sprites",
632 "vs_mono_sprite",
633 "fs_mono_sprite",
634 &layouts.globals,
635 &layouts.instances_with_texture,
636 wgpu::PrimitiveTopology::TriangleStrip,
637 &[Some(color_target.clone())],
638 1,
639 );
640
641 let subpixel_sprites = if dual_source_blending {
642 let subpixel_blend = wgpu::BlendState {
643 color: wgpu::BlendComponent {
644 src_factor: wgpu::BlendFactor::Src1,
645 dst_factor: wgpu::BlendFactor::OneMinusSrc1,
646 operation: wgpu::BlendOperation::Add,
647 },
648 alpha: wgpu::BlendComponent {
649 src_factor: wgpu::BlendFactor::One,
650 dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
651 operation: wgpu::BlendOperation::Add,
652 },
653 };
654
655 Some(create_pipeline(
656 "subpixel_sprites",
657 "vs_subpixel_sprite",
658 "fs_subpixel_sprite",
659 &layouts.globals,
660 &layouts.instances_with_texture,
661 wgpu::PrimitiveTopology::TriangleStrip,
662 &[Some(wgpu::ColorTargetState {
663 format: surface_format,
664 blend: Some(subpixel_blend),
665 write_mask: wgpu::ColorWrites::COLOR,
666 })],
667 1,
668 ))
669 } else {
670 None
671 };
672
673 let poly_sprites = create_pipeline(
674 "poly_sprites",
675 "vs_poly_sprite",
676 "fs_poly_sprite",
677 &layouts.globals,
678 &layouts.instances_with_texture,
679 wgpu::PrimitiveTopology::TriangleStrip,
680 &[Some(color_target.clone())],
681 1,
682 );
683
684 let surfaces = create_pipeline(
685 "surfaces",
686 "vs_surface",
687 "fs_surface",
688 &layouts.globals,
689 &layouts.surfaces,
690 wgpu::PrimitiveTopology::TriangleStrip,
691 &[Some(color_target)],
692 1,
693 );
694
695 WgpuPipelines {
696 quads,
697 shadows,
698 path_rasterization,
699 paths,
700 underlines,
701 mono_sprites,
702 subpixel_sprites,
703 poly_sprites,
704 surfaces,
705 }
706 }
707
708 fn create_path_intermediate(
709 device: &wgpu::Device,
710 format: wgpu::TextureFormat,
711 width: u32,
712 height: u32,
713 ) -> (wgpu::Texture, wgpu::TextureView) {
714 let texture = device.create_texture(&wgpu::TextureDescriptor {
715 label: Some("path_intermediate"),
716 size: wgpu::Extent3d {
717 width: width.max(1),
718 height: height.max(1),
719 depth_or_array_layers: 1,
720 },
721 mip_level_count: 1,
722 sample_count: 1,
723 dimension: wgpu::TextureDimension::D2,
724 format,
725 usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::TEXTURE_BINDING,
726 view_formats: &[],
727 });
728 let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
729 (texture, view)
730 }
731
732 fn create_msaa_if_needed(
733 device: &wgpu::Device,
734 format: wgpu::TextureFormat,
735 width: u32,
736 height: u32,
737 sample_count: u32,
738 ) -> Option<(wgpu::Texture, wgpu::TextureView)> {
739 if sample_count <= 1 {
740 return None;
741 }
742 let texture = device.create_texture(&wgpu::TextureDescriptor {
743 label: Some("path_msaa"),
744 size: wgpu::Extent3d {
745 width: width.max(1),
746 height: height.max(1),
747 depth_or_array_layers: 1,
748 },
749 mip_level_count: 1,
750 sample_count,
751 dimension: wgpu::TextureDimension::D2,
752 format,
753 usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
754 view_formats: &[],
755 });
756 let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
757 Some((texture, view))
758 }
759
760 pub fn update_drawable_size(&mut self, size: Size<DevicePixels>) {
761 let width = size.width.0 as u32;
762 let height = size.height.0 as u32;
763
764 if width != self.surface_config.width || height != self.surface_config.height {
765 // Wait for any in-flight GPU work to complete before destroying textures
766 if let Err(e) = self.device.poll(wgpu::PollType::Wait {
767 submission_index: None,
768 timeout: None,
769 }) {
770 warn!("Failed to poll device during resize: {e:?}");
771 }
772
773 // Destroy old textures before allocating new ones to avoid GPU memory spikes
774 if let Some(ref texture) = self.path_intermediate_texture {
775 texture.destroy();
776 }
777 if let Some(ref texture) = self.path_msaa_texture {
778 texture.destroy();
779 }
780
781 self.surface_config.width = width.max(1);
782 self.surface_config.height = height.max(1);
783 self.surface.configure(&self.device, &self.surface_config);
784
785 // Invalidate intermediate textures - they will be lazily recreated
786 // in draw() after we confirm the surface is healthy. This avoids
787 // panics when the device/surface is in an invalid state during resize.
788 self.path_intermediate_texture = None;
789 self.path_intermediate_view = None;
790 self.path_msaa_texture = None;
791 self.path_msaa_view = None;
792 }
793 }
794
795 fn ensure_intermediate_textures(&mut self) {
796 if self.path_intermediate_texture.is_some() {
797 return;
798 }
799
800 let (path_intermediate_texture, path_intermediate_view) = {
801 let (t, v) = Self::create_path_intermediate(
802 &self.device,
803 self.surface_config.format,
804 self.surface_config.width,
805 self.surface_config.height,
806 );
807 (Some(t), Some(v))
808 };
809 self.path_intermediate_texture = path_intermediate_texture;
810 self.path_intermediate_view = path_intermediate_view;
811
812 let (path_msaa_texture, path_msaa_view) = Self::create_msaa_if_needed(
813 &self.device,
814 self.surface_config.format,
815 self.surface_config.width,
816 self.surface_config.height,
817 self.rendering_params.path_sample_count,
818 )
819 .map(|(t, v)| (Some(t), Some(v)))
820 .unwrap_or((None, None));
821 self.path_msaa_texture = path_msaa_texture;
822 self.path_msaa_view = path_msaa_view;
823 }
824
825 pub fn update_transparency(&mut self, transparent: bool) {
826 let new_alpha_mode = if transparent {
827 self.transparent_alpha_mode
828 } else {
829 self.opaque_alpha_mode
830 };
831
832 if new_alpha_mode != self.surface_config.alpha_mode {
833 self.surface_config.alpha_mode = new_alpha_mode;
834 self.surface.configure(&self.device, &self.surface_config);
835 self.pipelines = Self::create_pipelines(
836 &self.device,
837 &self.bind_group_layouts,
838 self.surface_config.format,
839 self.surface_config.alpha_mode,
840 self.rendering_params.path_sample_count,
841 self.dual_source_blending,
842 );
843 }
844 }
845
846 #[allow(dead_code)]
847 pub fn viewport_size(&self) -> Size<DevicePixels> {
848 Size {
849 width: DevicePixels(self.surface_config.width as i32),
850 height: DevicePixels(self.surface_config.height as i32),
851 }
852 }
853
854 pub fn sprite_atlas(&self) -> &Arc<WgpuAtlas> {
855 &self.atlas
856 }
857
858 pub fn gpu_specs(&self) -> GpuSpecs {
859 GpuSpecs {
860 is_software_emulated: self.adapter_info.device_type == wgpu::DeviceType::Cpu,
861 device_name: self.adapter_info.name.clone(),
862 driver_name: self.adapter_info.driver.clone(),
863 driver_info: self.adapter_info.driver_info.clone(),
864 }
865 }
866
867 pub fn draw(&mut self, scene: &Scene) {
868 self.atlas.before_frame();
869
870 let frame = match self.surface.get_current_texture() {
871 Ok(frame) => frame,
872 Err(wgpu::SurfaceError::Lost | wgpu::SurfaceError::Outdated) => {
873 self.surface.configure(&self.device, &self.surface_config);
874 return;
875 }
876 Err(e) => {
877 log::error!("Failed to acquire surface texture: {e}");
878 return;
879 }
880 };
881
882 // Now that we know the surface is healthy, ensure intermediate textures exist
883 self.ensure_intermediate_textures();
884
885 let frame_view = frame
886 .texture
887 .create_view(&wgpu::TextureViewDescriptor::default());
888
889 let gamma_params = GammaParams {
890 gamma_ratios: self.rendering_params.gamma_ratios,
891 grayscale_enhanced_contrast: self.rendering_params.grayscale_enhanced_contrast,
892 subpixel_enhanced_contrast: self.rendering_params.subpixel_enhanced_contrast,
893 _pad: [0.0; 2],
894 };
895
896 let globals = GlobalParams {
897 viewport_size: [
898 self.surface_config.width as f32,
899 self.surface_config.height as f32,
900 ],
901 premultiplied_alpha: if self.surface_config.alpha_mode
902 == wgpu::CompositeAlphaMode::PreMultiplied
903 {
904 1
905 } else {
906 0
907 },
908 pad: 0,
909 };
910
911 let path_globals = GlobalParams {
912 premultiplied_alpha: 0,
913 ..globals
914 };
915
916 self.queue
917 .write_buffer(&self.globals_buffer, 0, bytemuck::bytes_of(&globals));
918 self.queue.write_buffer(
919 &self.globals_buffer,
920 self.path_globals_offset,
921 bytemuck::bytes_of(&path_globals),
922 );
923 self.queue.write_buffer(
924 &self.globals_buffer,
925 self.gamma_offset,
926 bytemuck::bytes_of(&gamma_params),
927 );
928
929 loop {
930 let mut instance_offset: u64 = 0;
931 let mut overflow = false;
932
933 let mut encoder = self
934 .device
935 .create_command_encoder(&wgpu::CommandEncoderDescriptor {
936 label: Some("main_encoder"),
937 });
938
939 {
940 let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
941 label: Some("main_pass"),
942 color_attachments: &[Some(wgpu::RenderPassColorAttachment {
943 view: &frame_view,
944 resolve_target: None,
945 ops: wgpu::Operations {
946 load: wgpu::LoadOp::Clear(wgpu::Color::TRANSPARENT),
947 store: wgpu::StoreOp::Store,
948 },
949 depth_slice: None,
950 })],
951 depth_stencil_attachment: None,
952 ..Default::default()
953 });
954
955 for batch in scene.batches() {
956 let ok = match batch {
957 PrimitiveBatch::Quads(range) => {
958 self.draw_quads(&scene.quads[range], &mut instance_offset, &mut pass)
959 }
960 PrimitiveBatch::Shadows(range) => self.draw_shadows(
961 &scene.shadows[range],
962 &mut instance_offset,
963 &mut pass,
964 ),
965 PrimitiveBatch::Paths(range) => {
966 let paths = &scene.paths[range];
967 if paths.is_empty() {
968 continue;
969 }
970
971 drop(pass);
972
973 let did_draw = self.draw_paths_to_intermediate(
974 &mut encoder,
975 paths,
976 &mut instance_offset,
977 );
978
979 pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
980 label: Some("main_pass_continued"),
981 color_attachments: &[Some(wgpu::RenderPassColorAttachment {
982 view: &frame_view,
983 resolve_target: None,
984 ops: wgpu::Operations {
985 load: wgpu::LoadOp::Load,
986 store: wgpu::StoreOp::Store,
987 },
988 depth_slice: None,
989 })],
990 depth_stencil_attachment: None,
991 ..Default::default()
992 });
993
994 if did_draw {
995 self.draw_paths_from_intermediate(
996 paths,
997 &mut instance_offset,
998 &mut pass,
999 )
1000 } else {
1001 false
1002 }
1003 }
1004 PrimitiveBatch::Underlines(range) => self.draw_underlines(
1005 &scene.underlines[range],
1006 &mut instance_offset,
1007 &mut pass,
1008 ),
1009 PrimitiveBatch::MonochromeSprites { texture_id, range } => self
1010 .draw_monochrome_sprites(
1011 &scene.monochrome_sprites[range],
1012 texture_id,
1013 &mut instance_offset,
1014 &mut pass,
1015 ),
1016 PrimitiveBatch::SubpixelSprites { texture_id, range } => self
1017 .draw_subpixel_sprites(
1018 &scene.subpixel_sprites[range],
1019 texture_id,
1020 &mut instance_offset,
1021 &mut pass,
1022 ),
1023 PrimitiveBatch::PolychromeSprites { texture_id, range } => self
1024 .draw_polychrome_sprites(
1025 &scene.polychrome_sprites[range],
1026 texture_id,
1027 &mut instance_offset,
1028 &mut pass,
1029 ),
1030 PrimitiveBatch::Surfaces(_surfaces) => {
1031 // Surfaces are macOS-only for video playback
1032 // Not implemented for Linux/wgpu
1033 true
1034 }
1035 };
1036 if !ok {
1037 overflow = true;
1038 break;
1039 }
1040 }
1041 }
1042
1043 if overflow {
1044 drop(encoder);
1045 if self.instance_buffer_capacity >= 256 * 1024 * 1024 {
1046 log::error!(
1047 "instance buffer size grew too large: {}",
1048 self.instance_buffer_capacity
1049 );
1050 frame.present();
1051 return;
1052 }
1053 self.grow_instance_buffer();
1054 continue;
1055 }
1056
1057 self.queue.submit(std::iter::once(encoder.finish()));
1058 frame.present();
1059 return;
1060 }
1061 }
1062
1063 fn draw_quads(
1064 &self,
1065 quads: &[Quad],
1066 instance_offset: &mut u64,
1067 pass: &mut wgpu::RenderPass<'_>,
1068 ) -> bool {
1069 let data = unsafe { Self::instance_bytes(quads) };
1070 self.draw_instances(
1071 data,
1072 quads.len() as u32,
1073 &self.pipelines.quads,
1074 instance_offset,
1075 pass,
1076 )
1077 }
1078
1079 fn draw_shadows(
1080 &self,
1081 shadows: &[Shadow],
1082 instance_offset: &mut u64,
1083 pass: &mut wgpu::RenderPass<'_>,
1084 ) -> bool {
1085 let data = unsafe { Self::instance_bytes(shadows) };
1086 self.draw_instances(
1087 data,
1088 shadows.len() as u32,
1089 &self.pipelines.shadows,
1090 instance_offset,
1091 pass,
1092 )
1093 }
1094
1095 fn draw_underlines(
1096 &self,
1097 underlines: &[Underline],
1098 instance_offset: &mut u64,
1099 pass: &mut wgpu::RenderPass<'_>,
1100 ) -> bool {
1101 let data = unsafe { Self::instance_bytes(underlines) };
1102 self.draw_instances(
1103 data,
1104 underlines.len() as u32,
1105 &self.pipelines.underlines,
1106 instance_offset,
1107 pass,
1108 )
1109 }
1110
1111 fn draw_monochrome_sprites(
1112 &self,
1113 sprites: &[MonochromeSprite],
1114 texture_id: AtlasTextureId,
1115 instance_offset: &mut u64,
1116 pass: &mut wgpu::RenderPass<'_>,
1117 ) -> bool {
1118 let tex_info = self.atlas.get_texture_info(texture_id);
1119 let data = unsafe { Self::instance_bytes(sprites) };
1120 self.draw_instances_with_texture(
1121 data,
1122 sprites.len() as u32,
1123 &tex_info.view,
1124 &self.pipelines.mono_sprites,
1125 instance_offset,
1126 pass,
1127 )
1128 }
1129
1130 fn draw_subpixel_sprites(
1131 &self,
1132 sprites: &[SubpixelSprite],
1133 texture_id: AtlasTextureId,
1134 instance_offset: &mut u64,
1135 pass: &mut wgpu::RenderPass<'_>,
1136 ) -> bool {
1137 let tex_info = self.atlas.get_texture_info(texture_id);
1138 let data = unsafe { Self::instance_bytes(sprites) };
1139 let pipeline = self
1140 .pipelines
1141 .subpixel_sprites
1142 .as_ref()
1143 .unwrap_or(&self.pipelines.mono_sprites);
1144 self.draw_instances_with_texture(
1145 data,
1146 sprites.len() as u32,
1147 &tex_info.view,
1148 pipeline,
1149 instance_offset,
1150 pass,
1151 )
1152 }
1153
1154 fn draw_polychrome_sprites(
1155 &self,
1156 sprites: &[PolychromeSprite],
1157 texture_id: AtlasTextureId,
1158 instance_offset: &mut u64,
1159 pass: &mut wgpu::RenderPass<'_>,
1160 ) -> bool {
1161 let tex_info = self.atlas.get_texture_info(texture_id);
1162 let data = unsafe { Self::instance_bytes(sprites) };
1163 self.draw_instances_with_texture(
1164 data,
1165 sprites.len() as u32,
1166 &tex_info.view,
1167 &self.pipelines.poly_sprites,
1168 instance_offset,
1169 pass,
1170 )
1171 }
1172
1173 fn draw_instances(
1174 &self,
1175 data: &[u8],
1176 instance_count: u32,
1177 pipeline: &wgpu::RenderPipeline,
1178 instance_offset: &mut u64,
1179 pass: &mut wgpu::RenderPass<'_>,
1180 ) -> bool {
1181 if instance_count == 0 {
1182 return true;
1183 }
1184 let Some((offset, size)) = self.write_to_instance_buffer(instance_offset, data) else {
1185 return false;
1186 };
1187 let bind_group = self.device.create_bind_group(&wgpu::BindGroupDescriptor {
1188 label: None,
1189 layout: &self.bind_group_layouts.instances,
1190 entries: &[wgpu::BindGroupEntry {
1191 binding: 0,
1192 resource: self.instance_binding(offset, size),
1193 }],
1194 });
1195 pass.set_pipeline(pipeline);
1196 pass.set_bind_group(0, &self.globals_bind_group, &[]);
1197 pass.set_bind_group(1, &bind_group, &[]);
1198 pass.draw(0..4, 0..instance_count);
1199 true
1200 }
1201
1202 fn draw_instances_with_texture(
1203 &self,
1204 data: &[u8],
1205 instance_count: u32,
1206 texture_view: &wgpu::TextureView,
1207 pipeline: &wgpu::RenderPipeline,
1208 instance_offset: &mut u64,
1209 pass: &mut wgpu::RenderPass<'_>,
1210 ) -> bool {
1211 if instance_count == 0 {
1212 return true;
1213 }
1214 let Some((offset, size)) = self.write_to_instance_buffer(instance_offset, data) else {
1215 return false;
1216 };
1217 let bind_group = self.device.create_bind_group(&wgpu::BindGroupDescriptor {
1218 label: None,
1219 layout: &self.bind_group_layouts.instances_with_texture,
1220 entries: &[
1221 wgpu::BindGroupEntry {
1222 binding: 0,
1223 resource: self.instance_binding(offset, size),
1224 },
1225 wgpu::BindGroupEntry {
1226 binding: 1,
1227 resource: wgpu::BindingResource::TextureView(texture_view),
1228 },
1229 wgpu::BindGroupEntry {
1230 binding: 2,
1231 resource: wgpu::BindingResource::Sampler(&self.atlas_sampler),
1232 },
1233 ],
1234 });
1235 pass.set_pipeline(pipeline);
1236 pass.set_bind_group(0, &self.globals_bind_group, &[]);
1237 pass.set_bind_group(1, &bind_group, &[]);
1238 pass.draw(0..4, 0..instance_count);
1239 true
1240 }
1241
1242 unsafe fn instance_bytes<T>(instances: &[T]) -> &[u8] {
1243 unsafe {
1244 std::slice::from_raw_parts(
1245 instances.as_ptr() as *const u8,
1246 std::mem::size_of_val(instances),
1247 )
1248 }
1249 }
1250
1251 fn draw_paths_from_intermediate(
1252 &self,
1253 paths: &[Path<ScaledPixels>],
1254 instance_offset: &mut u64,
1255 pass: &mut wgpu::RenderPass<'_>,
1256 ) -> bool {
1257 let first_path = &paths[0];
1258 let sprites: Vec<PathSprite> = if paths.last().map(|p| &p.order) == Some(&first_path.order)
1259 {
1260 paths
1261 .iter()
1262 .map(|p| PathSprite {
1263 bounds: p.clipped_bounds(),
1264 })
1265 .collect()
1266 } else {
1267 let mut bounds = first_path.clipped_bounds();
1268 for path in paths.iter().skip(1) {
1269 bounds = bounds.union(&path.clipped_bounds());
1270 }
1271 vec![PathSprite { bounds }]
1272 };
1273
1274 let Some(path_intermediate_view) = self.path_intermediate_view.as_ref() else {
1275 return true;
1276 };
1277
1278 let sprite_data = unsafe { Self::instance_bytes(&sprites) };
1279 self.draw_instances_with_texture(
1280 sprite_data,
1281 sprites.len() as u32,
1282 path_intermediate_view,
1283 &self.pipelines.paths,
1284 instance_offset,
1285 pass,
1286 )
1287 }
1288
1289 fn draw_paths_to_intermediate(
1290 &self,
1291 encoder: &mut wgpu::CommandEncoder,
1292 paths: &[Path<ScaledPixels>],
1293 instance_offset: &mut u64,
1294 ) -> bool {
1295 let mut vertices = Vec::new();
1296 for path in paths {
1297 let bounds = path.clipped_bounds();
1298 vertices.extend(path.vertices.iter().map(|v| PathRasterizationVertex {
1299 xy_position: v.xy_position,
1300 st_position: v.st_position,
1301 color: path.color,
1302 bounds,
1303 }));
1304 }
1305
1306 if vertices.is_empty() {
1307 return true;
1308 }
1309
1310 let vertex_data = unsafe { Self::instance_bytes(&vertices) };
1311 let Some((vertex_offset, vertex_size)) =
1312 self.write_to_instance_buffer(instance_offset, vertex_data)
1313 else {
1314 return false;
1315 };
1316
1317 let data_bind_group = self.device.create_bind_group(&wgpu::BindGroupDescriptor {
1318 label: Some("path_rasterization_bind_group"),
1319 layout: &self.bind_group_layouts.instances,
1320 entries: &[wgpu::BindGroupEntry {
1321 binding: 0,
1322 resource: self.instance_binding(vertex_offset, vertex_size),
1323 }],
1324 });
1325
1326 let Some(path_intermediate_view) = self.path_intermediate_view.as_ref() else {
1327 return true;
1328 };
1329
1330 let (target_view, resolve_target) = if let Some(ref msaa_view) = self.path_msaa_view {
1331 (msaa_view, Some(path_intermediate_view))
1332 } else {
1333 (path_intermediate_view, None)
1334 };
1335
1336 {
1337 let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
1338 label: Some("path_rasterization_pass"),
1339 color_attachments: &[Some(wgpu::RenderPassColorAttachment {
1340 view: target_view,
1341 resolve_target,
1342 ops: wgpu::Operations {
1343 load: wgpu::LoadOp::Clear(wgpu::Color::TRANSPARENT),
1344 store: wgpu::StoreOp::Store,
1345 },
1346 depth_slice: None,
1347 })],
1348 depth_stencil_attachment: None,
1349 ..Default::default()
1350 });
1351
1352 pass.set_pipeline(&self.pipelines.path_rasterization);
1353 pass.set_bind_group(0, &self.path_globals_bind_group, &[]);
1354 pass.set_bind_group(1, &data_bind_group, &[]);
1355 pass.draw(0..vertices.len() as u32, 0..1);
1356 }
1357
1358 true
1359 }
1360
1361 fn grow_instance_buffer(&mut self) {
1362 let new_capacity = self.instance_buffer_capacity * 2;
1363 log::info!("increased instance buffer size to {}", new_capacity);
1364 self.instance_buffer = self.device.create_buffer(&wgpu::BufferDescriptor {
1365 label: Some("instance_buffer"),
1366 size: new_capacity,
1367 usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
1368 mapped_at_creation: false,
1369 });
1370 self.instance_buffer_capacity = new_capacity;
1371 }
1372
1373 fn write_to_instance_buffer(
1374 &self,
1375 instance_offset: &mut u64,
1376 data: &[u8],
1377 ) -> Option<(u64, NonZeroU64)> {
1378 let offset = (*instance_offset).next_multiple_of(self.storage_buffer_alignment);
1379 let size = (data.len() as u64).max(16);
1380 if offset + size > self.instance_buffer_capacity {
1381 return None;
1382 }
1383 self.queue.write_buffer(&self.instance_buffer, offset, data);
1384 *instance_offset = offset + size;
1385 Some((offset, NonZeroU64::new(size).expect("size is at least 16")))
1386 }
1387
1388 fn instance_binding(&self, offset: u64, size: NonZeroU64) -> wgpu::BindingResource<'_> {
1389 wgpu::BindingResource::Buffer(wgpu::BufferBinding {
1390 buffer: &self.instance_buffer,
1391 offset,
1392 size: Some(size),
1393 })
1394 }
1395
1396 pub fn destroy(&mut self) {
1397 // wgpu resources are automatically cleaned up when dropped
1398 }
1399}
1400
1401struct RenderingParameters {
1402 path_sample_count: u32,
1403 gamma_ratios: [f32; 4],
1404 grayscale_enhanced_contrast: f32,
1405 subpixel_enhanced_contrast: f32,
1406}
1407
1408impl RenderingParameters {
1409 fn new(adapter: &wgpu::Adapter, surface_format: wgpu::TextureFormat) -> Self {
1410 use std::env;
1411
1412 let format_features = adapter.get_texture_format_features(surface_format);
1413 let path_sample_count = [4, 2, 1]
1414 .into_iter()
1415 .find(|&n| format_features.flags.sample_count_supported(n))
1416 .unwrap_or(1);
1417
1418 let gamma = env::var("ZED_FONTS_GAMMA")
1419 .ok()
1420 .and_then(|v| v.parse().ok())
1421 .unwrap_or(1.8_f32)
1422 .clamp(1.0, 2.2);
1423 let gamma_ratios = get_gamma_correction_ratios(gamma);
1424
1425 let grayscale_enhanced_contrast = env::var("ZED_FONTS_GRAYSCALE_ENHANCED_CONTRAST")
1426 .ok()
1427 .and_then(|v| v.parse().ok())
1428 .unwrap_or(1.0_f32)
1429 .max(0.0);
1430
1431 let subpixel_enhanced_contrast = env::var("ZED_FONTS_SUBPIXEL_ENHANCED_CONTRAST")
1432 .ok()
1433 .and_then(|v| v.parse().ok())
1434 .unwrap_or(0.5_f32)
1435 .max(0.0);
1436
1437 Self {
1438 path_sample_count,
1439 gamma_ratios,
1440 grayscale_enhanced_contrast,
1441 subpixel_enhanced_contrast,
1442 }
1443 }
1444}