1use crate::{WgpuAtlas, WgpuContext};
2use bytemuck::{Pod, Zeroable};
3use gpui::{
4 AtlasTextureId, Background, Bounds, DevicePixels, GpuSpecs, MonochromeSprite, Path, Point,
5 PolychromeSprite, PrimitiveBatch, Quad, ScaledPixels, Scene, Shadow, Size, SubpixelSprite,
6 Underline, get_gamma_correction_ratios,
7};
8use log::warn;
9use raw_window_handle::{HasDisplayHandle, HasWindowHandle};
10use std::num::NonZeroU64;
11use std::sync::Arc;
12
13#[repr(C)]
14#[derive(Clone, Copy, Pod, Zeroable)]
15struct GlobalParams {
16 viewport_size: [f32; 2],
17 premultiplied_alpha: u32,
18 pad: u32,
19}
20
21#[repr(C)]
22#[derive(Clone, Copy, Pod, Zeroable)]
23struct PodBounds {
24 origin: [f32; 2],
25 size: [f32; 2],
26}
27
28impl From<Bounds<ScaledPixels>> for PodBounds {
29 fn from(bounds: Bounds<ScaledPixels>) -> Self {
30 Self {
31 origin: [bounds.origin.x.0, bounds.origin.y.0],
32 size: [bounds.size.width.0, bounds.size.height.0],
33 }
34 }
35}
36
37#[repr(C)]
38#[derive(Clone, Copy, Pod, Zeroable)]
39struct SurfaceParams {
40 bounds: PodBounds,
41 content_mask: PodBounds,
42}
43
44#[repr(C)]
45#[derive(Clone, Copy, Pod, Zeroable)]
46struct GammaParams {
47 gamma_ratios: [f32; 4],
48 grayscale_enhanced_contrast: f32,
49 subpixel_enhanced_contrast: f32,
50 _pad: [f32; 2],
51}
52
53#[derive(Clone, Debug)]
54#[repr(C)]
55struct PathSprite {
56 bounds: Bounds<ScaledPixels>,
57}
58
59#[derive(Clone, Debug)]
60#[repr(C)]
61struct PathRasterizationVertex {
62 xy_position: Point<ScaledPixels>,
63 st_position: Point<f32>,
64 color: Background,
65 bounds: Bounds<ScaledPixels>,
66}
67
68pub struct WgpuSurfaceConfig {
69 pub size: Size<DevicePixels>,
70 pub transparent: bool,
71}
72
73struct WgpuPipelines {
74 quads: wgpu::RenderPipeline,
75 shadows: wgpu::RenderPipeline,
76 path_rasterization: wgpu::RenderPipeline,
77 paths: wgpu::RenderPipeline,
78 underlines: wgpu::RenderPipeline,
79 mono_sprites: wgpu::RenderPipeline,
80 subpixel_sprites: Option<wgpu::RenderPipeline>,
81 poly_sprites: wgpu::RenderPipeline,
82 #[allow(dead_code)]
83 surfaces: wgpu::RenderPipeline,
84}
85
86struct WgpuBindGroupLayouts {
87 globals: wgpu::BindGroupLayout,
88 instances: wgpu::BindGroupLayout,
89 instances_with_texture: wgpu::BindGroupLayout,
90 surfaces: wgpu::BindGroupLayout,
91}
92
93pub struct WgpuRenderer {
94 device: Arc<wgpu::Device>,
95 queue: Arc<wgpu::Queue>,
96 surface: wgpu::Surface<'static>,
97 surface_config: wgpu::SurfaceConfiguration,
98 pipelines: WgpuPipelines,
99 bind_group_layouts: WgpuBindGroupLayouts,
100 atlas: Arc<WgpuAtlas>,
101 atlas_sampler: wgpu::Sampler,
102 globals_buffer: wgpu::Buffer,
103 path_globals_offset: u64,
104 gamma_offset: u64,
105 globals_bind_group: wgpu::BindGroup,
106 path_globals_bind_group: wgpu::BindGroup,
107 instance_buffer: wgpu::Buffer,
108 instance_buffer_capacity: u64,
109 storage_buffer_alignment: u64,
110 path_intermediate_texture: wgpu::Texture,
111 path_intermediate_view: wgpu::TextureView,
112 path_msaa_texture: Option<wgpu::Texture>,
113 path_msaa_view: Option<wgpu::TextureView>,
114 rendering_params: RenderingParameters,
115 dual_source_blending: bool,
116 adapter_info: wgpu::AdapterInfo,
117 transparent_alpha_mode: wgpu::CompositeAlphaMode,
118 opaque_alpha_mode: wgpu::CompositeAlphaMode,
119}
120
121impl WgpuRenderer {
122 /// Creates a new WgpuRenderer from raw window handles.
123 ///
124 /// # Safety
125 /// The caller must ensure that the window handle remains valid for the lifetime
126 /// of the returned renderer.
127 pub fn new<W: HasWindowHandle + HasDisplayHandle>(
128 gpu_context: &mut Option<WgpuContext>,
129 window: &W,
130 config: WgpuSurfaceConfig,
131 ) -> anyhow::Result<Self> {
132 let window_handle = window
133 .window_handle()
134 .map_err(|e| anyhow::anyhow!("Failed to get window handle: {e}"))?;
135 let display_handle = window
136 .display_handle()
137 .map_err(|e| anyhow::anyhow!("Failed to get display handle: {e}"))?;
138
139 let target = wgpu::SurfaceTargetUnsafe::RawHandle {
140 raw_display_handle: display_handle.as_raw(),
141 raw_window_handle: window_handle.as_raw(),
142 };
143
144 // Use the existing context's instance if available, otherwise create a new one.
145 // The surface must be created with the same instance that will be used for
146 // adapter selection, otherwise wgpu will panic.
147 let instance = gpu_context
148 .as_ref()
149 .map(|ctx| ctx.instance.clone())
150 .unwrap_or_else(WgpuContext::instance);
151
152 // Safety: The caller guarantees that the window handle is valid for the
153 // lifetime of this renderer. In practice, the RawWindow struct is created
154 // from the native window handles and the surface is dropped before the window.
155 let surface = unsafe {
156 instance
157 .create_surface_unsafe(target)
158 .map_err(|e| anyhow::anyhow!("Failed to create surface: {e}"))?
159 };
160
161 let context = match gpu_context {
162 Some(context) => {
163 context.check_compatible_with_surface(&surface)?;
164 context
165 }
166 None => gpu_context.insert(WgpuContext::new(instance, &surface)?),
167 };
168
169 let surface_caps = surface.get_capabilities(&context.adapter);
170 let preferred_formats = [
171 wgpu::TextureFormat::Bgra8Unorm,
172 wgpu::TextureFormat::Rgba8Unorm,
173 ];
174 let surface_format = preferred_formats
175 .iter()
176 .find(|f| surface_caps.formats.contains(f))
177 .copied()
178 .or_else(|| surface_caps.formats.iter().find(|f| !f.is_srgb()).copied())
179 .or_else(|| surface_caps.formats.first().copied())
180 .ok_or_else(|| {
181 anyhow::anyhow!(
182 "Surface reports no supported texture formats for adapter {:?}",
183 context.adapter.get_info().name
184 )
185 })?;
186
187 let pick_alpha_mode =
188 |preferences: &[wgpu::CompositeAlphaMode]| -> anyhow::Result<wgpu::CompositeAlphaMode> {
189 preferences
190 .iter()
191 .find(|p| surface_caps.alpha_modes.contains(p))
192 .copied()
193 .or_else(|| surface_caps.alpha_modes.first().copied())
194 .ok_or_else(|| {
195 anyhow::anyhow!(
196 "Surface reports no supported alpha modes for adapter {:?}",
197 context.adapter.get_info().name
198 )
199 })
200 };
201
202 let transparent_alpha_mode = pick_alpha_mode(&[
203 wgpu::CompositeAlphaMode::PreMultiplied,
204 wgpu::CompositeAlphaMode::Inherit,
205 ])?;
206
207 let opaque_alpha_mode = pick_alpha_mode(&[
208 wgpu::CompositeAlphaMode::Opaque,
209 wgpu::CompositeAlphaMode::Inherit,
210 ])?;
211
212 let alpha_mode = if config.transparent {
213 transparent_alpha_mode
214 } else {
215 opaque_alpha_mode
216 };
217
218 let surface_config = wgpu::SurfaceConfiguration {
219 usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
220 format: surface_format,
221 width: config.size.width.0 as u32,
222 height: config.size.height.0 as u32,
223 present_mode: wgpu::PresentMode::Fifo,
224 desired_maximum_frame_latency: 2,
225 alpha_mode,
226 view_formats: vec![],
227 };
228 surface.configure(&context.device, &surface_config);
229
230 let device = Arc::clone(&context.device);
231 let queue = Arc::clone(&context.queue);
232 let dual_source_blending = context.supports_dual_source_blending();
233
234 let rendering_params = RenderingParameters::new(&context.adapter, surface_format);
235 let bind_group_layouts = Self::create_bind_group_layouts(&device);
236 let pipelines = Self::create_pipelines(
237 &device,
238 &bind_group_layouts,
239 surface_format,
240 alpha_mode,
241 rendering_params.path_sample_count,
242 dual_source_blending,
243 );
244
245 let atlas = Arc::new(WgpuAtlas::new(Arc::clone(&device), Arc::clone(&queue)));
246 let atlas_sampler = device.create_sampler(&wgpu::SamplerDescriptor {
247 label: Some("atlas_sampler"),
248 mag_filter: wgpu::FilterMode::Linear,
249 min_filter: wgpu::FilterMode::Linear,
250 ..Default::default()
251 });
252
253 let uniform_alignment = device.limits().min_uniform_buffer_offset_alignment as u64;
254 let globals_size = std::mem::size_of::<GlobalParams>() as u64;
255 let gamma_size = std::mem::size_of::<GammaParams>() as u64;
256 let path_globals_offset = globals_size.next_multiple_of(uniform_alignment);
257 let gamma_offset = (path_globals_offset + globals_size).next_multiple_of(uniform_alignment);
258
259 let globals_buffer = device.create_buffer(&wgpu::BufferDescriptor {
260 label: Some("globals_buffer"),
261 size: gamma_offset + gamma_size,
262 usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
263 mapped_at_creation: false,
264 });
265
266 let storage_buffer_alignment = device.limits().min_storage_buffer_offset_alignment as u64;
267 let initial_instance_buffer_capacity = 2 * 1024 * 1024;
268 let instance_buffer = device.create_buffer(&wgpu::BufferDescriptor {
269 label: Some("instance_buffer"),
270 size: initial_instance_buffer_capacity,
271 usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
272 mapped_at_creation: false,
273 });
274
275 let (path_intermediate_texture, path_intermediate_view) = Self::create_path_intermediate(
276 &device,
277 surface_format,
278 config.size.width.0 as u32,
279 config.size.height.0 as u32,
280 );
281
282 let (path_msaa_texture, path_msaa_view) = Self::create_msaa_if_needed(
283 &device,
284 surface_format,
285 config.size.width.0 as u32,
286 config.size.height.0 as u32,
287 rendering_params.path_sample_count,
288 )
289 .map(|(t, v)| (Some(t), Some(v)))
290 .unwrap_or((None, None));
291
292 let globals_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
293 label: Some("globals_bind_group"),
294 layout: &bind_group_layouts.globals,
295 entries: &[
296 wgpu::BindGroupEntry {
297 binding: 0,
298 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
299 buffer: &globals_buffer,
300 offset: 0,
301 size: Some(NonZeroU64::new(globals_size).unwrap()),
302 }),
303 },
304 wgpu::BindGroupEntry {
305 binding: 1,
306 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
307 buffer: &globals_buffer,
308 offset: gamma_offset,
309 size: Some(NonZeroU64::new(gamma_size).unwrap()),
310 }),
311 },
312 ],
313 });
314
315 let path_globals_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
316 label: Some("path_globals_bind_group"),
317 layout: &bind_group_layouts.globals,
318 entries: &[
319 wgpu::BindGroupEntry {
320 binding: 0,
321 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
322 buffer: &globals_buffer,
323 offset: path_globals_offset,
324 size: Some(NonZeroU64::new(globals_size).unwrap()),
325 }),
326 },
327 wgpu::BindGroupEntry {
328 binding: 1,
329 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
330 buffer: &globals_buffer,
331 offset: gamma_offset,
332 size: Some(NonZeroU64::new(gamma_size).unwrap()),
333 }),
334 },
335 ],
336 });
337
338 let adapter_info = context.adapter.get_info();
339
340 Ok(Self {
341 device,
342 queue,
343 surface,
344 surface_config,
345 pipelines,
346 bind_group_layouts,
347 atlas,
348 atlas_sampler,
349 globals_buffer,
350 path_globals_offset,
351 gamma_offset,
352 globals_bind_group,
353 path_globals_bind_group,
354 instance_buffer,
355 instance_buffer_capacity: initial_instance_buffer_capacity,
356 storage_buffer_alignment,
357 path_intermediate_texture,
358 path_intermediate_view,
359 path_msaa_texture,
360 path_msaa_view,
361 rendering_params,
362 dual_source_blending,
363 adapter_info,
364 transparent_alpha_mode,
365 opaque_alpha_mode,
366 })
367 }
368
369 fn create_bind_group_layouts(device: &wgpu::Device) -> WgpuBindGroupLayouts {
370 let globals =
371 device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
372 label: Some("globals_layout"),
373 entries: &[
374 wgpu::BindGroupLayoutEntry {
375 binding: 0,
376 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
377 ty: wgpu::BindingType::Buffer {
378 ty: wgpu::BufferBindingType::Uniform,
379 has_dynamic_offset: false,
380 min_binding_size: NonZeroU64::new(
381 std::mem::size_of::<GlobalParams>() as u64
382 ),
383 },
384 count: None,
385 },
386 wgpu::BindGroupLayoutEntry {
387 binding: 1,
388 visibility: wgpu::ShaderStages::FRAGMENT,
389 ty: wgpu::BindingType::Buffer {
390 ty: wgpu::BufferBindingType::Uniform,
391 has_dynamic_offset: false,
392 min_binding_size: NonZeroU64::new(
393 std::mem::size_of::<GammaParams>() as u64
394 ),
395 },
396 count: None,
397 },
398 ],
399 });
400
401 let storage_buffer_entry = |binding: u32| wgpu::BindGroupLayoutEntry {
402 binding,
403 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
404 ty: wgpu::BindingType::Buffer {
405 ty: wgpu::BufferBindingType::Storage { read_only: true },
406 has_dynamic_offset: false,
407 min_binding_size: None,
408 },
409 count: None,
410 };
411
412 let instances = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
413 label: Some("instances_layout"),
414 entries: &[storage_buffer_entry(0)],
415 });
416
417 let instances_with_texture =
418 device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
419 label: Some("instances_with_texture_layout"),
420 entries: &[
421 storage_buffer_entry(0),
422 wgpu::BindGroupLayoutEntry {
423 binding: 1,
424 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
425 ty: wgpu::BindingType::Texture {
426 sample_type: wgpu::TextureSampleType::Float { filterable: true },
427 view_dimension: wgpu::TextureViewDimension::D2,
428 multisampled: false,
429 },
430 count: None,
431 },
432 wgpu::BindGroupLayoutEntry {
433 binding: 2,
434 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
435 ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
436 count: None,
437 },
438 ],
439 });
440
441 let surfaces = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
442 label: Some("surfaces_layout"),
443 entries: &[
444 wgpu::BindGroupLayoutEntry {
445 binding: 0,
446 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
447 ty: wgpu::BindingType::Buffer {
448 ty: wgpu::BufferBindingType::Uniform,
449 has_dynamic_offset: false,
450 min_binding_size: NonZeroU64::new(
451 std::mem::size_of::<SurfaceParams>() as u64
452 ),
453 },
454 count: None,
455 },
456 wgpu::BindGroupLayoutEntry {
457 binding: 1,
458 visibility: wgpu::ShaderStages::FRAGMENT,
459 ty: wgpu::BindingType::Texture {
460 sample_type: wgpu::TextureSampleType::Float { filterable: true },
461 view_dimension: wgpu::TextureViewDimension::D2,
462 multisampled: false,
463 },
464 count: None,
465 },
466 wgpu::BindGroupLayoutEntry {
467 binding: 2,
468 visibility: wgpu::ShaderStages::FRAGMENT,
469 ty: wgpu::BindingType::Texture {
470 sample_type: wgpu::TextureSampleType::Float { filterable: true },
471 view_dimension: wgpu::TextureViewDimension::D2,
472 multisampled: false,
473 },
474 count: None,
475 },
476 wgpu::BindGroupLayoutEntry {
477 binding: 3,
478 visibility: wgpu::ShaderStages::FRAGMENT,
479 ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
480 count: None,
481 },
482 ],
483 });
484
485 WgpuBindGroupLayouts {
486 globals,
487 instances,
488 instances_with_texture,
489 surfaces,
490 }
491 }
492
493 fn create_pipelines(
494 device: &wgpu::Device,
495 layouts: &WgpuBindGroupLayouts,
496 surface_format: wgpu::TextureFormat,
497 alpha_mode: wgpu::CompositeAlphaMode,
498 path_sample_count: u32,
499 dual_source_blending: bool,
500 ) -> WgpuPipelines {
501 let shader_source = include_str!("shaders.wgsl");
502 let shader_module = device.create_shader_module(wgpu::ShaderModuleDescriptor {
503 label: Some("gpui_shaders"),
504 source: wgpu::ShaderSource::Wgsl(shader_source.into()),
505 });
506
507 let blend_mode = match alpha_mode {
508 wgpu::CompositeAlphaMode::PreMultiplied => {
509 wgpu::BlendState::PREMULTIPLIED_ALPHA_BLENDING
510 }
511 _ => wgpu::BlendState::ALPHA_BLENDING,
512 };
513
514 let color_target = wgpu::ColorTargetState {
515 format: surface_format,
516 blend: Some(blend_mode),
517 write_mask: wgpu::ColorWrites::ALL,
518 };
519
520 let create_pipeline = |name: &str,
521 vs_entry: &str,
522 fs_entry: &str,
523 globals_layout: &wgpu::BindGroupLayout,
524 data_layout: &wgpu::BindGroupLayout,
525 topology: wgpu::PrimitiveTopology,
526 color_targets: &[Option<wgpu::ColorTargetState>],
527 sample_count: u32| {
528 let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
529 label: Some(&format!("{name}_layout")),
530 bind_group_layouts: &[globals_layout, data_layout],
531 immediate_size: 0,
532 });
533
534 device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
535 label: Some(name),
536 layout: Some(&pipeline_layout),
537 vertex: wgpu::VertexState {
538 module: &shader_module,
539 entry_point: Some(vs_entry),
540 buffers: &[],
541 compilation_options: wgpu::PipelineCompilationOptions::default(),
542 },
543 fragment: Some(wgpu::FragmentState {
544 module: &shader_module,
545 entry_point: Some(fs_entry),
546 targets: color_targets,
547 compilation_options: wgpu::PipelineCompilationOptions::default(),
548 }),
549 primitive: wgpu::PrimitiveState {
550 topology,
551 strip_index_format: None,
552 front_face: wgpu::FrontFace::Ccw,
553 cull_mode: None,
554 polygon_mode: wgpu::PolygonMode::Fill,
555 unclipped_depth: false,
556 conservative: false,
557 },
558 depth_stencil: None,
559 multisample: wgpu::MultisampleState {
560 count: sample_count,
561 mask: !0,
562 alpha_to_coverage_enabled: false,
563 },
564 multiview_mask: None,
565 cache: None,
566 })
567 };
568
569 let quads = create_pipeline(
570 "quads",
571 "vs_quad",
572 "fs_quad",
573 &layouts.globals,
574 &layouts.instances,
575 wgpu::PrimitiveTopology::TriangleStrip,
576 &[Some(color_target.clone())],
577 1,
578 );
579
580 let shadows = create_pipeline(
581 "shadows",
582 "vs_shadow",
583 "fs_shadow",
584 &layouts.globals,
585 &layouts.instances,
586 wgpu::PrimitiveTopology::TriangleStrip,
587 &[Some(color_target.clone())],
588 1,
589 );
590
591 let path_rasterization = create_pipeline(
592 "path_rasterization",
593 "vs_path_rasterization",
594 "fs_path_rasterization",
595 &layouts.globals,
596 &layouts.instances,
597 wgpu::PrimitiveTopology::TriangleList,
598 &[Some(wgpu::ColorTargetState {
599 format: surface_format,
600 blend: Some(wgpu::BlendState::PREMULTIPLIED_ALPHA_BLENDING),
601 write_mask: wgpu::ColorWrites::ALL,
602 })],
603 path_sample_count,
604 );
605
606 let paths_blend = wgpu::BlendState {
607 color: wgpu::BlendComponent {
608 src_factor: wgpu::BlendFactor::One,
609 dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
610 operation: wgpu::BlendOperation::Add,
611 },
612 alpha: wgpu::BlendComponent {
613 src_factor: wgpu::BlendFactor::One,
614 dst_factor: wgpu::BlendFactor::One,
615 operation: wgpu::BlendOperation::Add,
616 },
617 };
618
619 let paths = create_pipeline(
620 "paths",
621 "vs_path",
622 "fs_path",
623 &layouts.globals,
624 &layouts.instances_with_texture,
625 wgpu::PrimitiveTopology::TriangleStrip,
626 &[Some(wgpu::ColorTargetState {
627 format: surface_format,
628 blend: Some(paths_blend),
629 write_mask: wgpu::ColorWrites::ALL,
630 })],
631 1,
632 );
633
634 let underlines = create_pipeline(
635 "underlines",
636 "vs_underline",
637 "fs_underline",
638 &layouts.globals,
639 &layouts.instances,
640 wgpu::PrimitiveTopology::TriangleStrip,
641 &[Some(color_target.clone())],
642 1,
643 );
644
645 let mono_sprites = create_pipeline(
646 "mono_sprites",
647 "vs_mono_sprite",
648 "fs_mono_sprite",
649 &layouts.globals,
650 &layouts.instances_with_texture,
651 wgpu::PrimitiveTopology::TriangleStrip,
652 &[Some(color_target.clone())],
653 1,
654 );
655
656 let subpixel_sprites = if dual_source_blending {
657 let subpixel_blend = wgpu::BlendState {
658 color: wgpu::BlendComponent {
659 src_factor: wgpu::BlendFactor::Src1,
660 dst_factor: wgpu::BlendFactor::OneMinusSrc1,
661 operation: wgpu::BlendOperation::Add,
662 },
663 alpha: wgpu::BlendComponent {
664 src_factor: wgpu::BlendFactor::One,
665 dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
666 operation: wgpu::BlendOperation::Add,
667 },
668 };
669
670 Some(create_pipeline(
671 "subpixel_sprites",
672 "vs_subpixel_sprite",
673 "fs_subpixel_sprite",
674 &layouts.globals,
675 &layouts.instances_with_texture,
676 wgpu::PrimitiveTopology::TriangleStrip,
677 &[Some(wgpu::ColorTargetState {
678 format: surface_format,
679 blend: Some(subpixel_blend),
680 write_mask: wgpu::ColorWrites::COLOR,
681 })],
682 1,
683 ))
684 } else {
685 None
686 };
687
688 let poly_sprites = create_pipeline(
689 "poly_sprites",
690 "vs_poly_sprite",
691 "fs_poly_sprite",
692 &layouts.globals,
693 &layouts.instances_with_texture,
694 wgpu::PrimitiveTopology::TriangleStrip,
695 &[Some(color_target.clone())],
696 1,
697 );
698
699 let surfaces = create_pipeline(
700 "surfaces",
701 "vs_surface",
702 "fs_surface",
703 &layouts.globals,
704 &layouts.surfaces,
705 wgpu::PrimitiveTopology::TriangleStrip,
706 &[Some(color_target)],
707 1,
708 );
709
710 WgpuPipelines {
711 quads,
712 shadows,
713 path_rasterization,
714 paths,
715 underlines,
716 mono_sprites,
717 subpixel_sprites,
718 poly_sprites,
719 surfaces,
720 }
721 }
722
723 fn create_path_intermediate(
724 device: &wgpu::Device,
725 format: wgpu::TextureFormat,
726 width: u32,
727 height: u32,
728 ) -> (wgpu::Texture, wgpu::TextureView) {
729 let texture = device.create_texture(&wgpu::TextureDescriptor {
730 label: Some("path_intermediate"),
731 size: wgpu::Extent3d {
732 width: width.max(1),
733 height: height.max(1),
734 depth_or_array_layers: 1,
735 },
736 mip_level_count: 1,
737 sample_count: 1,
738 dimension: wgpu::TextureDimension::D2,
739 format,
740 usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::TEXTURE_BINDING,
741 view_formats: &[],
742 });
743 let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
744 (texture, view)
745 }
746
747 fn create_msaa_if_needed(
748 device: &wgpu::Device,
749 format: wgpu::TextureFormat,
750 width: u32,
751 height: u32,
752 sample_count: u32,
753 ) -> Option<(wgpu::Texture, wgpu::TextureView)> {
754 if sample_count <= 1 {
755 return None;
756 }
757 let texture = device.create_texture(&wgpu::TextureDescriptor {
758 label: Some("path_msaa"),
759 size: wgpu::Extent3d {
760 width: width.max(1),
761 height: height.max(1),
762 depth_or_array_layers: 1,
763 },
764 mip_level_count: 1,
765 sample_count,
766 dimension: wgpu::TextureDimension::D2,
767 format,
768 usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
769 view_formats: &[],
770 });
771 let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
772 Some((texture, view))
773 }
774
775 pub fn update_drawable_size(&mut self, size: Size<DevicePixels>) {
776 let width = size.width.0 as u32;
777 let height = size.height.0 as u32;
778
779 if width != self.surface_config.width || height != self.surface_config.height {
780 // Wait for any in-flight GPU work to complete before destroying textures
781 if let Err(e) = self.device.poll(wgpu::PollType::Wait {
782 submission_index: None,
783 timeout: None,
784 }) {
785 warn!("Failed to poll device during resize: {e:?}");
786 }
787
788 // Destroy old textures before allocating new ones to avoid GPU memory spikes
789 self.path_intermediate_texture.destroy();
790 if let Some(ref texture) = self.path_msaa_texture {
791 texture.destroy();
792 }
793
794 self.surface_config.width = width.max(1);
795 self.surface_config.height = height.max(1);
796 self.surface.configure(&self.device, &self.surface_config);
797
798 let (path_intermediate_texture, path_intermediate_view) =
799 Self::create_path_intermediate(
800 &self.device,
801 self.surface_config.format,
802 self.surface_config.width,
803 self.surface_config.height,
804 );
805 self.path_intermediate_texture = path_intermediate_texture;
806 self.path_intermediate_view = path_intermediate_view;
807
808 let (path_msaa_texture, path_msaa_view) = Self::create_msaa_if_needed(
809 &self.device,
810 self.surface_config.format,
811 self.surface_config.width,
812 self.surface_config.height,
813 self.rendering_params.path_sample_count,
814 )
815 .map(|(t, v)| (Some(t), Some(v)))
816 .unwrap_or((None, None));
817 self.path_msaa_texture = path_msaa_texture;
818 self.path_msaa_view = path_msaa_view;
819 }
820 }
821
822 pub fn update_transparency(&mut self, transparent: bool) {
823 let new_alpha_mode = if transparent {
824 self.transparent_alpha_mode
825 } else {
826 self.opaque_alpha_mode
827 };
828
829 if new_alpha_mode != self.surface_config.alpha_mode {
830 self.surface_config.alpha_mode = new_alpha_mode;
831 self.surface.configure(&self.device, &self.surface_config);
832 self.pipelines = Self::create_pipelines(
833 &self.device,
834 &self.bind_group_layouts,
835 self.surface_config.format,
836 self.surface_config.alpha_mode,
837 self.rendering_params.path_sample_count,
838 self.dual_source_blending,
839 );
840 }
841 }
842
843 #[allow(dead_code)]
844 pub fn viewport_size(&self) -> Size<DevicePixels> {
845 Size {
846 width: DevicePixels(self.surface_config.width as i32),
847 height: DevicePixels(self.surface_config.height as i32),
848 }
849 }
850
851 pub fn sprite_atlas(&self) -> &Arc<WgpuAtlas> {
852 &self.atlas
853 }
854
855 pub fn gpu_specs(&self) -> GpuSpecs {
856 GpuSpecs {
857 is_software_emulated: self.adapter_info.device_type == wgpu::DeviceType::Cpu,
858 device_name: self.adapter_info.name.clone(),
859 driver_name: self.adapter_info.driver.clone(),
860 driver_info: self.adapter_info.driver_info.clone(),
861 }
862 }
863
864 pub fn draw(&mut self, scene: &Scene) {
865 self.atlas.before_frame();
866
867 let frame = match self.surface.get_current_texture() {
868 Ok(frame) => frame,
869 Err(wgpu::SurfaceError::Lost | wgpu::SurfaceError::Outdated) => {
870 self.surface.configure(&self.device, &self.surface_config);
871 return;
872 }
873 Err(e) => {
874 log::error!("Failed to acquire surface texture: {e}");
875 return;
876 }
877 };
878 let frame_view = frame
879 .texture
880 .create_view(&wgpu::TextureViewDescriptor::default());
881
882 let gamma_params = GammaParams {
883 gamma_ratios: self.rendering_params.gamma_ratios,
884 grayscale_enhanced_contrast: self.rendering_params.grayscale_enhanced_contrast,
885 subpixel_enhanced_contrast: self.rendering_params.subpixel_enhanced_contrast,
886 _pad: [0.0; 2],
887 };
888
889 let globals = GlobalParams {
890 viewport_size: [
891 self.surface_config.width as f32,
892 self.surface_config.height as f32,
893 ],
894 premultiplied_alpha: if self.surface_config.alpha_mode
895 == wgpu::CompositeAlphaMode::PreMultiplied
896 {
897 1
898 } else {
899 0
900 },
901 pad: 0,
902 };
903
904 let path_globals = GlobalParams {
905 premultiplied_alpha: 0,
906 ..globals
907 };
908
909 self.queue
910 .write_buffer(&self.globals_buffer, 0, bytemuck::bytes_of(&globals));
911 self.queue.write_buffer(
912 &self.globals_buffer,
913 self.path_globals_offset,
914 bytemuck::bytes_of(&path_globals),
915 );
916 self.queue.write_buffer(
917 &self.globals_buffer,
918 self.gamma_offset,
919 bytemuck::bytes_of(&gamma_params),
920 );
921
922 loop {
923 let mut instance_offset: u64 = 0;
924 let mut overflow = false;
925
926 let mut encoder = self
927 .device
928 .create_command_encoder(&wgpu::CommandEncoderDescriptor {
929 label: Some("main_encoder"),
930 });
931
932 {
933 let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
934 label: Some("main_pass"),
935 color_attachments: &[Some(wgpu::RenderPassColorAttachment {
936 view: &frame_view,
937 resolve_target: None,
938 ops: wgpu::Operations {
939 load: wgpu::LoadOp::Clear(wgpu::Color::TRANSPARENT),
940 store: wgpu::StoreOp::Store,
941 },
942 depth_slice: None,
943 })],
944 depth_stencil_attachment: None,
945 ..Default::default()
946 });
947
948 for batch in scene.batches() {
949 let ok = match batch {
950 PrimitiveBatch::Quads(range) => {
951 self.draw_quads(&scene.quads[range], &mut instance_offset, &mut pass)
952 }
953 PrimitiveBatch::Shadows(range) => self.draw_shadows(
954 &scene.shadows[range],
955 &mut instance_offset,
956 &mut pass,
957 ),
958 PrimitiveBatch::Paths(range) => {
959 let paths = &scene.paths[range];
960 if paths.is_empty() {
961 continue;
962 }
963
964 drop(pass);
965
966 let did_draw = self.draw_paths_to_intermediate(
967 &mut encoder,
968 paths,
969 &mut instance_offset,
970 );
971
972 pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
973 label: Some("main_pass_continued"),
974 color_attachments: &[Some(wgpu::RenderPassColorAttachment {
975 view: &frame_view,
976 resolve_target: None,
977 ops: wgpu::Operations {
978 load: wgpu::LoadOp::Load,
979 store: wgpu::StoreOp::Store,
980 },
981 depth_slice: None,
982 })],
983 depth_stencil_attachment: None,
984 ..Default::default()
985 });
986
987 if did_draw {
988 self.draw_paths_from_intermediate(
989 paths,
990 &mut instance_offset,
991 &mut pass,
992 )
993 } else {
994 false
995 }
996 }
997 PrimitiveBatch::Underlines(range) => self.draw_underlines(
998 &scene.underlines[range],
999 &mut instance_offset,
1000 &mut pass,
1001 ),
1002 PrimitiveBatch::MonochromeSprites { texture_id, range } => self
1003 .draw_monochrome_sprites(
1004 &scene.monochrome_sprites[range],
1005 texture_id,
1006 &mut instance_offset,
1007 &mut pass,
1008 ),
1009 PrimitiveBatch::SubpixelSprites { texture_id, range } => self
1010 .draw_subpixel_sprites(
1011 &scene.subpixel_sprites[range],
1012 texture_id,
1013 &mut instance_offset,
1014 &mut pass,
1015 ),
1016 PrimitiveBatch::PolychromeSprites { texture_id, range } => self
1017 .draw_polychrome_sprites(
1018 &scene.polychrome_sprites[range],
1019 texture_id,
1020 &mut instance_offset,
1021 &mut pass,
1022 ),
1023 PrimitiveBatch::Surfaces(_surfaces) => {
1024 // Surfaces are macOS-only for video playback
1025 // Not implemented for Linux/wgpu
1026 true
1027 }
1028 };
1029 if !ok {
1030 overflow = true;
1031 break;
1032 }
1033 }
1034 }
1035
1036 if overflow {
1037 drop(encoder);
1038 if self.instance_buffer_capacity >= 256 * 1024 * 1024 {
1039 log::error!(
1040 "instance buffer size grew too large: {}",
1041 self.instance_buffer_capacity
1042 );
1043 frame.present();
1044 return;
1045 }
1046 self.grow_instance_buffer();
1047 continue;
1048 }
1049
1050 self.queue.submit(std::iter::once(encoder.finish()));
1051 frame.present();
1052 return;
1053 }
1054 }
1055
1056 fn draw_quads(
1057 &self,
1058 quads: &[Quad],
1059 instance_offset: &mut u64,
1060 pass: &mut wgpu::RenderPass<'_>,
1061 ) -> bool {
1062 let data = unsafe { Self::instance_bytes(quads) };
1063 self.draw_instances(
1064 data,
1065 quads.len() as u32,
1066 &self.pipelines.quads,
1067 instance_offset,
1068 pass,
1069 )
1070 }
1071
1072 fn draw_shadows(
1073 &self,
1074 shadows: &[Shadow],
1075 instance_offset: &mut u64,
1076 pass: &mut wgpu::RenderPass<'_>,
1077 ) -> bool {
1078 let data = unsafe { Self::instance_bytes(shadows) };
1079 self.draw_instances(
1080 data,
1081 shadows.len() as u32,
1082 &self.pipelines.shadows,
1083 instance_offset,
1084 pass,
1085 )
1086 }
1087
1088 fn draw_underlines(
1089 &self,
1090 underlines: &[Underline],
1091 instance_offset: &mut u64,
1092 pass: &mut wgpu::RenderPass<'_>,
1093 ) -> bool {
1094 let data = unsafe { Self::instance_bytes(underlines) };
1095 self.draw_instances(
1096 data,
1097 underlines.len() as u32,
1098 &self.pipelines.underlines,
1099 instance_offset,
1100 pass,
1101 )
1102 }
1103
1104 fn draw_monochrome_sprites(
1105 &self,
1106 sprites: &[MonochromeSprite],
1107 texture_id: AtlasTextureId,
1108 instance_offset: &mut u64,
1109 pass: &mut wgpu::RenderPass<'_>,
1110 ) -> bool {
1111 let tex_info = self.atlas.get_texture_info(texture_id);
1112 let data = unsafe { Self::instance_bytes(sprites) };
1113 self.draw_instances_with_texture(
1114 data,
1115 sprites.len() as u32,
1116 &tex_info.view,
1117 &self.pipelines.mono_sprites,
1118 instance_offset,
1119 pass,
1120 )
1121 }
1122
1123 fn draw_subpixel_sprites(
1124 &self,
1125 sprites: &[SubpixelSprite],
1126 texture_id: AtlasTextureId,
1127 instance_offset: &mut u64,
1128 pass: &mut wgpu::RenderPass<'_>,
1129 ) -> bool {
1130 let tex_info = self.atlas.get_texture_info(texture_id);
1131 let data = unsafe { Self::instance_bytes(sprites) };
1132 let pipeline = self
1133 .pipelines
1134 .subpixel_sprites
1135 .as_ref()
1136 .unwrap_or(&self.pipelines.mono_sprites);
1137 self.draw_instances_with_texture(
1138 data,
1139 sprites.len() as u32,
1140 &tex_info.view,
1141 pipeline,
1142 instance_offset,
1143 pass,
1144 )
1145 }
1146
1147 fn draw_polychrome_sprites(
1148 &self,
1149 sprites: &[PolychromeSprite],
1150 texture_id: AtlasTextureId,
1151 instance_offset: &mut u64,
1152 pass: &mut wgpu::RenderPass<'_>,
1153 ) -> bool {
1154 let tex_info = self.atlas.get_texture_info(texture_id);
1155 let data = unsafe { Self::instance_bytes(sprites) };
1156 self.draw_instances_with_texture(
1157 data,
1158 sprites.len() as u32,
1159 &tex_info.view,
1160 &self.pipelines.poly_sprites,
1161 instance_offset,
1162 pass,
1163 )
1164 }
1165
1166 fn draw_instances(
1167 &self,
1168 data: &[u8],
1169 instance_count: u32,
1170 pipeline: &wgpu::RenderPipeline,
1171 instance_offset: &mut u64,
1172 pass: &mut wgpu::RenderPass<'_>,
1173 ) -> bool {
1174 if instance_count == 0 {
1175 return true;
1176 }
1177 let Some((offset, size)) = self.write_to_instance_buffer(instance_offset, data) else {
1178 return false;
1179 };
1180 let bind_group = self.device.create_bind_group(&wgpu::BindGroupDescriptor {
1181 label: None,
1182 layout: &self.bind_group_layouts.instances,
1183 entries: &[wgpu::BindGroupEntry {
1184 binding: 0,
1185 resource: self.instance_binding(offset, size),
1186 }],
1187 });
1188 pass.set_pipeline(pipeline);
1189 pass.set_bind_group(0, &self.globals_bind_group, &[]);
1190 pass.set_bind_group(1, &bind_group, &[]);
1191 pass.draw(0..4, 0..instance_count);
1192 true
1193 }
1194
1195 fn draw_instances_with_texture(
1196 &self,
1197 data: &[u8],
1198 instance_count: u32,
1199 texture_view: &wgpu::TextureView,
1200 pipeline: &wgpu::RenderPipeline,
1201 instance_offset: &mut u64,
1202 pass: &mut wgpu::RenderPass<'_>,
1203 ) -> bool {
1204 if instance_count == 0 {
1205 return true;
1206 }
1207 let Some((offset, size)) = self.write_to_instance_buffer(instance_offset, data) else {
1208 return false;
1209 };
1210 let bind_group = self.device.create_bind_group(&wgpu::BindGroupDescriptor {
1211 label: None,
1212 layout: &self.bind_group_layouts.instances_with_texture,
1213 entries: &[
1214 wgpu::BindGroupEntry {
1215 binding: 0,
1216 resource: self.instance_binding(offset, size),
1217 },
1218 wgpu::BindGroupEntry {
1219 binding: 1,
1220 resource: wgpu::BindingResource::TextureView(texture_view),
1221 },
1222 wgpu::BindGroupEntry {
1223 binding: 2,
1224 resource: wgpu::BindingResource::Sampler(&self.atlas_sampler),
1225 },
1226 ],
1227 });
1228 pass.set_pipeline(pipeline);
1229 pass.set_bind_group(0, &self.globals_bind_group, &[]);
1230 pass.set_bind_group(1, &bind_group, &[]);
1231 pass.draw(0..4, 0..instance_count);
1232 true
1233 }
1234
1235 unsafe fn instance_bytes<T>(instances: &[T]) -> &[u8] {
1236 unsafe {
1237 std::slice::from_raw_parts(
1238 instances.as_ptr() as *const u8,
1239 std::mem::size_of_val(instances),
1240 )
1241 }
1242 }
1243
1244 fn draw_paths_from_intermediate(
1245 &self,
1246 paths: &[Path<ScaledPixels>],
1247 instance_offset: &mut u64,
1248 pass: &mut wgpu::RenderPass<'_>,
1249 ) -> bool {
1250 let first_path = &paths[0];
1251 let sprites: Vec<PathSprite> = if paths.last().map(|p| &p.order) == Some(&first_path.order)
1252 {
1253 paths
1254 .iter()
1255 .map(|p| PathSprite {
1256 bounds: p.clipped_bounds(),
1257 })
1258 .collect()
1259 } else {
1260 let mut bounds = first_path.clipped_bounds();
1261 for path in paths.iter().skip(1) {
1262 bounds = bounds.union(&path.clipped_bounds());
1263 }
1264 vec![PathSprite { bounds }]
1265 };
1266
1267 let sprite_data = unsafe { Self::instance_bytes(&sprites) };
1268 self.draw_instances_with_texture(
1269 sprite_data,
1270 sprites.len() as u32,
1271 &self.path_intermediate_view,
1272 &self.pipelines.paths,
1273 instance_offset,
1274 pass,
1275 )
1276 }
1277
1278 fn draw_paths_to_intermediate(
1279 &self,
1280 encoder: &mut wgpu::CommandEncoder,
1281 paths: &[Path<ScaledPixels>],
1282 instance_offset: &mut u64,
1283 ) -> bool {
1284 let mut vertices = Vec::new();
1285 for path in paths {
1286 let bounds = path.clipped_bounds();
1287 vertices.extend(path.vertices.iter().map(|v| PathRasterizationVertex {
1288 xy_position: v.xy_position,
1289 st_position: v.st_position,
1290 color: path.color,
1291 bounds,
1292 }));
1293 }
1294
1295 if vertices.is_empty() {
1296 return true;
1297 }
1298
1299 let vertex_data = unsafe { Self::instance_bytes(&vertices) };
1300 let Some((vertex_offset, vertex_size)) =
1301 self.write_to_instance_buffer(instance_offset, vertex_data)
1302 else {
1303 return false;
1304 };
1305
1306 let data_bind_group = self.device.create_bind_group(&wgpu::BindGroupDescriptor {
1307 label: Some("path_rasterization_bind_group"),
1308 layout: &self.bind_group_layouts.instances,
1309 entries: &[wgpu::BindGroupEntry {
1310 binding: 0,
1311 resource: self.instance_binding(vertex_offset, vertex_size),
1312 }],
1313 });
1314
1315 let (target_view, resolve_target) = if let Some(ref msaa_view) = self.path_msaa_view {
1316 (msaa_view, Some(&self.path_intermediate_view))
1317 } else {
1318 (&self.path_intermediate_view, None)
1319 };
1320
1321 {
1322 let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
1323 label: Some("path_rasterization_pass"),
1324 color_attachments: &[Some(wgpu::RenderPassColorAttachment {
1325 view: target_view,
1326 resolve_target,
1327 ops: wgpu::Operations {
1328 load: wgpu::LoadOp::Clear(wgpu::Color::TRANSPARENT),
1329 store: wgpu::StoreOp::Store,
1330 },
1331 depth_slice: None,
1332 })],
1333 depth_stencil_attachment: None,
1334 ..Default::default()
1335 });
1336
1337 pass.set_pipeline(&self.pipelines.path_rasterization);
1338 pass.set_bind_group(0, &self.path_globals_bind_group, &[]);
1339 pass.set_bind_group(1, &data_bind_group, &[]);
1340 pass.draw(0..vertices.len() as u32, 0..1);
1341 }
1342
1343 true
1344 }
1345
1346 fn grow_instance_buffer(&mut self) {
1347 let new_capacity = self.instance_buffer_capacity * 2;
1348 log::info!("increased instance buffer size to {}", new_capacity);
1349 self.instance_buffer = self.device.create_buffer(&wgpu::BufferDescriptor {
1350 label: Some("instance_buffer"),
1351 size: new_capacity,
1352 usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
1353 mapped_at_creation: false,
1354 });
1355 self.instance_buffer_capacity = new_capacity;
1356 }
1357
1358 fn write_to_instance_buffer(
1359 &self,
1360 instance_offset: &mut u64,
1361 data: &[u8],
1362 ) -> Option<(u64, NonZeroU64)> {
1363 let offset = (*instance_offset).next_multiple_of(self.storage_buffer_alignment);
1364 let size = (data.len() as u64).max(16);
1365 if offset + size > self.instance_buffer_capacity {
1366 return None;
1367 }
1368 self.queue.write_buffer(&self.instance_buffer, offset, data);
1369 *instance_offset = offset + size;
1370 Some((offset, NonZeroU64::new(size).expect("size is at least 16")))
1371 }
1372
1373 fn instance_binding(&self, offset: u64, size: NonZeroU64) -> wgpu::BindingResource<'_> {
1374 wgpu::BindingResource::Buffer(wgpu::BufferBinding {
1375 buffer: &self.instance_buffer,
1376 offset,
1377 size: Some(size),
1378 })
1379 }
1380
1381 pub fn destroy(&mut self) {
1382 // wgpu resources are automatically cleaned up when dropped
1383 }
1384}
1385
1386struct RenderingParameters {
1387 path_sample_count: u32,
1388 gamma_ratios: [f32; 4],
1389 grayscale_enhanced_contrast: f32,
1390 subpixel_enhanced_contrast: f32,
1391}
1392
1393impl RenderingParameters {
1394 fn new(adapter: &wgpu::Adapter, surface_format: wgpu::TextureFormat) -> Self {
1395 use std::env;
1396
1397 let format_features = adapter.get_texture_format_features(surface_format);
1398 let path_sample_count = [4, 2, 1]
1399 .into_iter()
1400 .find(|&n| format_features.flags.sample_count_supported(n))
1401 .unwrap_or(1);
1402
1403 let gamma = env::var("ZED_FONTS_GAMMA")
1404 .ok()
1405 .and_then(|v| v.parse().ok())
1406 .unwrap_or(1.8_f32)
1407 .clamp(1.0, 2.2);
1408 let gamma_ratios = get_gamma_correction_ratios(gamma);
1409
1410 let grayscale_enhanced_contrast = env::var("ZED_FONTS_GRAYSCALE_ENHANCED_CONTRAST")
1411 .ok()
1412 .and_then(|v| v.parse().ok())
1413 .unwrap_or(1.0_f32)
1414 .max(0.0);
1415
1416 let subpixel_enhanced_contrast = env::var("ZED_FONTS_SUBPIXEL_ENHANCED_CONTRAST")
1417 .ok()
1418 .and_then(|v| v.parse().ok())
1419 .unwrap_or(0.5_f32)
1420 .max(0.0);
1421
1422 Self {
1423 path_sample_count,
1424 gamma_ratios,
1425 grayscale_enhanced_contrast,
1426 subpixel_enhanced_contrast,
1427 }
1428 }
1429}