1use crate::{WgpuAtlas, WgpuContext};
2use bytemuck::{Pod, Zeroable};
3use gpui::{
4 AtlasTextureId, Background, Bounds, DevicePixels, GpuSpecs, MonochromeSprite, Path, Point,
5 PolychromeSprite, PrimitiveBatch, Quad, ScaledPixels, Scene, Shadow, Size, SubpixelSprite,
6 Underline, get_gamma_correction_ratios,
7};
8use log::warn;
9#[cfg(not(target_family = "wasm"))]
10use raw_window_handle::{HasDisplayHandle, HasWindowHandle};
11use std::num::NonZeroU64;
12use std::sync::Arc;
13
14#[repr(C)]
15#[derive(Clone, Copy, Pod, Zeroable)]
16struct GlobalParams {
17 viewport_size: [f32; 2],
18 premultiplied_alpha: u32,
19 pad: u32,
20}
21
22#[repr(C)]
23#[derive(Clone, Copy, Pod, Zeroable)]
24struct PodBounds {
25 origin: [f32; 2],
26 size: [f32; 2],
27}
28
29impl From<Bounds<ScaledPixels>> for PodBounds {
30 fn from(bounds: Bounds<ScaledPixels>) -> Self {
31 Self {
32 origin: [bounds.origin.x.0, bounds.origin.y.0],
33 size: [bounds.size.width.0, bounds.size.height.0],
34 }
35 }
36}
37
38#[repr(C)]
39#[derive(Clone, Copy, Pod, Zeroable)]
40struct SurfaceParams {
41 bounds: PodBounds,
42 content_mask: PodBounds,
43}
44
45#[repr(C)]
46#[derive(Clone, Copy, Pod, Zeroable)]
47struct GammaParams {
48 gamma_ratios: [f32; 4],
49 grayscale_enhanced_contrast: f32,
50 subpixel_enhanced_contrast: f32,
51 _pad: [f32; 2],
52}
53
54#[derive(Clone, Debug)]
55#[repr(C)]
56struct PathSprite {
57 bounds: Bounds<ScaledPixels>,
58}
59
60#[derive(Clone, Debug)]
61#[repr(C)]
62struct PathRasterizationVertex {
63 xy_position: Point<ScaledPixels>,
64 st_position: Point<f32>,
65 color: Background,
66 bounds: Bounds<ScaledPixels>,
67}
68
69pub struct WgpuSurfaceConfig {
70 pub size: Size<DevicePixels>,
71 pub transparent: bool,
72}
73
74struct WgpuPipelines {
75 quads: wgpu::RenderPipeline,
76 shadows: wgpu::RenderPipeline,
77 path_rasterization: wgpu::RenderPipeline,
78 paths: wgpu::RenderPipeline,
79 underlines: wgpu::RenderPipeline,
80 mono_sprites: wgpu::RenderPipeline,
81 subpixel_sprites: Option<wgpu::RenderPipeline>,
82 poly_sprites: wgpu::RenderPipeline,
83 #[allow(dead_code)]
84 surfaces: wgpu::RenderPipeline,
85}
86
87struct WgpuBindGroupLayouts {
88 globals: wgpu::BindGroupLayout,
89 instances: wgpu::BindGroupLayout,
90 instances_with_texture: wgpu::BindGroupLayout,
91 surfaces: wgpu::BindGroupLayout,
92}
93
94pub struct WgpuRenderer {
95 device: Arc<wgpu::Device>,
96 queue: Arc<wgpu::Queue>,
97 surface: wgpu::Surface<'static>,
98 surface_config: wgpu::SurfaceConfiguration,
99 pipelines: WgpuPipelines,
100 bind_group_layouts: WgpuBindGroupLayouts,
101 atlas: Arc<WgpuAtlas>,
102 atlas_sampler: wgpu::Sampler,
103 globals_buffer: wgpu::Buffer,
104 path_globals_offset: u64,
105 gamma_offset: u64,
106 globals_bind_group: wgpu::BindGroup,
107 path_globals_bind_group: wgpu::BindGroup,
108 instance_buffer: wgpu::Buffer,
109 instance_buffer_capacity: u64,
110 max_buffer_size: u64,
111 storage_buffer_alignment: u64,
112 path_intermediate_texture: Option<wgpu::Texture>,
113 path_intermediate_view: Option<wgpu::TextureView>,
114 path_msaa_texture: Option<wgpu::Texture>,
115 path_msaa_view: Option<wgpu::TextureView>,
116 rendering_params: RenderingParameters,
117 dual_source_blending: bool,
118 adapter_info: wgpu::AdapterInfo,
119 transparent_alpha_mode: wgpu::CompositeAlphaMode,
120 opaque_alpha_mode: wgpu::CompositeAlphaMode,
121 max_texture_size: u32,
122}
123
124impl WgpuRenderer {
125 /// Creates a new WgpuRenderer from raw window handles.
126 ///
127 /// # Safety
128 /// The caller must ensure that the window handle remains valid for the lifetime
129 /// of the returned renderer.
130 #[cfg(not(target_family = "wasm"))]
131 pub fn new<W: HasWindowHandle + HasDisplayHandle>(
132 gpu_context: &mut Option<WgpuContext>,
133 window: &W,
134 config: WgpuSurfaceConfig,
135 ) -> anyhow::Result<Self> {
136 let window_handle = window
137 .window_handle()
138 .map_err(|e| anyhow::anyhow!("Failed to get window handle: {e}"))?;
139 let display_handle = window
140 .display_handle()
141 .map_err(|e| anyhow::anyhow!("Failed to get display handle: {e}"))?;
142
143 let target = wgpu::SurfaceTargetUnsafe::RawHandle {
144 raw_display_handle: display_handle.as_raw(),
145 raw_window_handle: window_handle.as_raw(),
146 };
147
148 // Use the existing context's instance if available, otherwise create a new one.
149 // The surface must be created with the same instance that will be used for
150 // adapter selection, otherwise wgpu will panic.
151 let instance = gpu_context
152 .as_ref()
153 .map(|ctx| ctx.instance.clone())
154 .unwrap_or_else(WgpuContext::instance);
155
156 // Safety: The caller guarantees that the window handle is valid for the
157 // lifetime of this renderer. In practice, the RawWindow struct is created
158 // from the native window handles and the surface is dropped before the window.
159 let surface = unsafe {
160 instance
161 .create_surface_unsafe(target)
162 .map_err(|e| anyhow::anyhow!("Failed to create surface: {e}"))?
163 };
164
165 let context = match gpu_context {
166 Some(context) => {
167 context.check_compatible_with_surface(&surface)?;
168 context
169 }
170 None => gpu_context.insert(WgpuContext::new(instance, &surface)?),
171 };
172
173 Self::new_with_surface(context, surface, config)
174 }
175
176 #[cfg(target_family = "wasm")]
177 pub fn new_from_canvas(
178 context: &WgpuContext,
179 canvas: &web_sys::HtmlCanvasElement,
180 config: WgpuSurfaceConfig,
181 ) -> anyhow::Result<Self> {
182 let surface = context
183 .instance
184 .create_surface(wgpu::SurfaceTarget::Canvas(canvas.clone()))
185 .map_err(|e| anyhow::anyhow!("Failed to create surface: {e}"))?;
186 Self::new_with_surface(context, surface, config)
187 }
188
189 pub fn new_with_surface(
190 context: &WgpuContext,
191 surface: wgpu::Surface<'static>,
192 config: WgpuSurfaceConfig,
193 ) -> anyhow::Result<Self> {
194 let surface_caps = surface.get_capabilities(&context.adapter);
195 let preferred_formats = [
196 wgpu::TextureFormat::Bgra8Unorm,
197 wgpu::TextureFormat::Rgba8Unorm,
198 ];
199 let surface_format = preferred_formats
200 .iter()
201 .find(|f| surface_caps.formats.contains(f))
202 .copied()
203 .or_else(|| surface_caps.formats.iter().find(|f| !f.is_srgb()).copied())
204 .or_else(|| surface_caps.formats.first().copied())
205 .ok_or_else(|| {
206 anyhow::anyhow!(
207 "Surface reports no supported texture formats for adapter {:?}",
208 context.adapter.get_info().name
209 )
210 })?;
211
212 let pick_alpha_mode =
213 |preferences: &[wgpu::CompositeAlphaMode]| -> anyhow::Result<wgpu::CompositeAlphaMode> {
214 preferences
215 .iter()
216 .find(|p| surface_caps.alpha_modes.contains(p))
217 .copied()
218 .or_else(|| surface_caps.alpha_modes.first().copied())
219 .ok_or_else(|| {
220 anyhow::anyhow!(
221 "Surface reports no supported alpha modes for adapter {:?}",
222 context.adapter.get_info().name
223 )
224 })
225 };
226
227 let transparent_alpha_mode = pick_alpha_mode(&[
228 wgpu::CompositeAlphaMode::PreMultiplied,
229 wgpu::CompositeAlphaMode::Inherit,
230 ])?;
231
232 let opaque_alpha_mode = pick_alpha_mode(&[
233 wgpu::CompositeAlphaMode::Opaque,
234 wgpu::CompositeAlphaMode::Inherit,
235 ])?;
236
237 let alpha_mode = if config.transparent {
238 transparent_alpha_mode
239 } else {
240 opaque_alpha_mode
241 };
242
243 let device = Arc::clone(&context.device);
244 let max_texture_size = device.limits().max_texture_dimension_2d;
245
246 let requested_width = config.size.width.0 as u32;
247 let requested_height = config.size.height.0 as u32;
248 let clamped_width = requested_width.min(max_texture_size);
249 let clamped_height = requested_height.min(max_texture_size);
250
251 if clamped_width != requested_width || clamped_height != requested_height {
252 warn!(
253 "Requested surface size ({}, {}) exceeds maximum texture dimension {}. \
254 Clamping to ({}, {}). Window content may not fill the entire window.",
255 requested_width, requested_height, max_texture_size, clamped_width, clamped_height
256 );
257 }
258
259 let surface_config = wgpu::SurfaceConfiguration {
260 usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
261 format: surface_format,
262 width: clamped_width.max(1),
263 height: clamped_height.max(1),
264 present_mode: wgpu::PresentMode::Fifo,
265 desired_maximum_frame_latency: 2,
266 alpha_mode,
267 view_formats: vec![],
268 };
269 surface.configure(&context.device, &surface_config);
270
271 let queue = Arc::clone(&context.queue);
272 let dual_source_blending = context.supports_dual_source_blending();
273
274 let rendering_params = RenderingParameters::new(&context.adapter, surface_format);
275 let bind_group_layouts = Self::create_bind_group_layouts(&device);
276 let pipelines = Self::create_pipelines(
277 &device,
278 &bind_group_layouts,
279 surface_format,
280 alpha_mode,
281 rendering_params.path_sample_count,
282 dual_source_blending,
283 );
284
285 let atlas = Arc::new(WgpuAtlas::new(Arc::clone(&device), Arc::clone(&queue)));
286 let atlas_sampler = device.create_sampler(&wgpu::SamplerDescriptor {
287 label: Some("atlas_sampler"),
288 mag_filter: wgpu::FilterMode::Linear,
289 min_filter: wgpu::FilterMode::Linear,
290 ..Default::default()
291 });
292
293 let uniform_alignment = device.limits().min_uniform_buffer_offset_alignment as u64;
294 let globals_size = std::mem::size_of::<GlobalParams>() as u64;
295 let gamma_size = std::mem::size_of::<GammaParams>() as u64;
296 let path_globals_offset = globals_size.next_multiple_of(uniform_alignment);
297 let gamma_offset = (path_globals_offset + globals_size).next_multiple_of(uniform_alignment);
298
299 let globals_buffer = device.create_buffer(&wgpu::BufferDescriptor {
300 label: Some("globals_buffer"),
301 size: gamma_offset + gamma_size,
302 usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
303 mapped_at_creation: false,
304 });
305
306 let max_buffer_size = device.limits().max_buffer_size;
307 let storage_buffer_alignment = device.limits().min_storage_buffer_offset_alignment as u64;
308 let initial_instance_buffer_capacity = 2 * 1024 * 1024;
309 let instance_buffer = device.create_buffer(&wgpu::BufferDescriptor {
310 label: Some("instance_buffer"),
311 size: initial_instance_buffer_capacity,
312 usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
313 mapped_at_creation: false,
314 });
315
316 let globals_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
317 label: Some("globals_bind_group"),
318 layout: &bind_group_layouts.globals,
319 entries: &[
320 wgpu::BindGroupEntry {
321 binding: 0,
322 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
323 buffer: &globals_buffer,
324 offset: 0,
325 size: Some(NonZeroU64::new(globals_size).unwrap()),
326 }),
327 },
328 wgpu::BindGroupEntry {
329 binding: 1,
330 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
331 buffer: &globals_buffer,
332 offset: gamma_offset,
333 size: Some(NonZeroU64::new(gamma_size).unwrap()),
334 }),
335 },
336 ],
337 });
338
339 let path_globals_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
340 label: Some("path_globals_bind_group"),
341 layout: &bind_group_layouts.globals,
342 entries: &[
343 wgpu::BindGroupEntry {
344 binding: 0,
345 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
346 buffer: &globals_buffer,
347 offset: path_globals_offset,
348 size: Some(NonZeroU64::new(globals_size).unwrap()),
349 }),
350 },
351 wgpu::BindGroupEntry {
352 binding: 1,
353 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
354 buffer: &globals_buffer,
355 offset: gamma_offset,
356 size: Some(NonZeroU64::new(gamma_size).unwrap()),
357 }),
358 },
359 ],
360 });
361
362 let adapter_info = context.adapter.get_info();
363
364 Ok(Self {
365 device,
366 queue,
367 surface,
368 surface_config,
369 pipelines,
370 bind_group_layouts,
371 atlas,
372 atlas_sampler,
373 globals_buffer,
374 path_globals_offset,
375 gamma_offset,
376 globals_bind_group,
377 path_globals_bind_group,
378 instance_buffer,
379 instance_buffer_capacity: initial_instance_buffer_capacity,
380 max_buffer_size,
381 storage_buffer_alignment,
382 // Defer intermediate texture creation to first draw call via ensure_intermediate_textures().
383 // This avoids panics when the device/surface is in an invalid state during initialization.
384 path_intermediate_texture: None,
385 path_intermediate_view: None,
386 path_msaa_texture: None,
387 path_msaa_view: None,
388 rendering_params,
389 dual_source_blending,
390 adapter_info,
391 transparent_alpha_mode,
392 opaque_alpha_mode,
393 max_texture_size,
394 })
395 }
396
397 fn create_bind_group_layouts(device: &wgpu::Device) -> WgpuBindGroupLayouts {
398 let globals =
399 device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
400 label: Some("globals_layout"),
401 entries: &[
402 wgpu::BindGroupLayoutEntry {
403 binding: 0,
404 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
405 ty: wgpu::BindingType::Buffer {
406 ty: wgpu::BufferBindingType::Uniform,
407 has_dynamic_offset: false,
408 min_binding_size: NonZeroU64::new(
409 std::mem::size_of::<GlobalParams>() as u64
410 ),
411 },
412 count: None,
413 },
414 wgpu::BindGroupLayoutEntry {
415 binding: 1,
416 visibility: wgpu::ShaderStages::FRAGMENT,
417 ty: wgpu::BindingType::Buffer {
418 ty: wgpu::BufferBindingType::Uniform,
419 has_dynamic_offset: false,
420 min_binding_size: NonZeroU64::new(
421 std::mem::size_of::<GammaParams>() as u64
422 ),
423 },
424 count: None,
425 },
426 ],
427 });
428
429 let storage_buffer_entry = |binding: u32| wgpu::BindGroupLayoutEntry {
430 binding,
431 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
432 ty: wgpu::BindingType::Buffer {
433 ty: wgpu::BufferBindingType::Storage { read_only: true },
434 has_dynamic_offset: false,
435 min_binding_size: None,
436 },
437 count: None,
438 };
439
440 let instances = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
441 label: Some("instances_layout"),
442 entries: &[storage_buffer_entry(0)],
443 });
444
445 let instances_with_texture =
446 device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
447 label: Some("instances_with_texture_layout"),
448 entries: &[
449 storage_buffer_entry(0),
450 wgpu::BindGroupLayoutEntry {
451 binding: 1,
452 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
453 ty: wgpu::BindingType::Texture {
454 sample_type: wgpu::TextureSampleType::Float { filterable: true },
455 view_dimension: wgpu::TextureViewDimension::D2,
456 multisampled: false,
457 },
458 count: None,
459 },
460 wgpu::BindGroupLayoutEntry {
461 binding: 2,
462 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
463 ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
464 count: None,
465 },
466 ],
467 });
468
469 let surfaces = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
470 label: Some("surfaces_layout"),
471 entries: &[
472 wgpu::BindGroupLayoutEntry {
473 binding: 0,
474 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
475 ty: wgpu::BindingType::Buffer {
476 ty: wgpu::BufferBindingType::Uniform,
477 has_dynamic_offset: false,
478 min_binding_size: NonZeroU64::new(
479 std::mem::size_of::<SurfaceParams>() as u64
480 ),
481 },
482 count: None,
483 },
484 wgpu::BindGroupLayoutEntry {
485 binding: 1,
486 visibility: wgpu::ShaderStages::FRAGMENT,
487 ty: wgpu::BindingType::Texture {
488 sample_type: wgpu::TextureSampleType::Float { filterable: true },
489 view_dimension: wgpu::TextureViewDimension::D2,
490 multisampled: false,
491 },
492 count: None,
493 },
494 wgpu::BindGroupLayoutEntry {
495 binding: 2,
496 visibility: wgpu::ShaderStages::FRAGMENT,
497 ty: wgpu::BindingType::Texture {
498 sample_type: wgpu::TextureSampleType::Float { filterable: true },
499 view_dimension: wgpu::TextureViewDimension::D2,
500 multisampled: false,
501 },
502 count: None,
503 },
504 wgpu::BindGroupLayoutEntry {
505 binding: 3,
506 visibility: wgpu::ShaderStages::FRAGMENT,
507 ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
508 count: None,
509 },
510 ],
511 });
512
513 WgpuBindGroupLayouts {
514 globals,
515 instances,
516 instances_with_texture,
517 surfaces,
518 }
519 }
520
521 fn create_pipelines(
522 device: &wgpu::Device,
523 layouts: &WgpuBindGroupLayouts,
524 surface_format: wgpu::TextureFormat,
525 alpha_mode: wgpu::CompositeAlphaMode,
526 path_sample_count: u32,
527 dual_source_blending: bool,
528 ) -> WgpuPipelines {
529 let base_shader_source = include_str!("shaders.wgsl");
530 let shader_module = device.create_shader_module(wgpu::ShaderModuleDescriptor {
531 label: Some("gpui_shaders"),
532 source: wgpu::ShaderSource::Wgsl(std::borrow::Cow::Borrowed(base_shader_source)),
533 });
534
535 let subpixel_shader_source = include_str!("shaders_subpixel.wgsl");
536 let subpixel_shader_module = if dual_source_blending {
537 let combined = format!(
538 "enable dual_source_blending;\n{base_shader_source}\n{subpixel_shader_source}"
539 );
540 Some(device.create_shader_module(wgpu::ShaderModuleDescriptor {
541 label: Some("gpui_subpixel_shaders"),
542 source: wgpu::ShaderSource::Wgsl(std::borrow::Cow::Owned(combined)),
543 }))
544 } else {
545 None
546 };
547
548 let blend_mode = match alpha_mode {
549 wgpu::CompositeAlphaMode::PreMultiplied => {
550 wgpu::BlendState::PREMULTIPLIED_ALPHA_BLENDING
551 }
552 _ => wgpu::BlendState::ALPHA_BLENDING,
553 };
554
555 let color_target = wgpu::ColorTargetState {
556 format: surface_format,
557 blend: Some(blend_mode),
558 write_mask: wgpu::ColorWrites::ALL,
559 };
560
561 let create_pipeline = |name: &str,
562 vs_entry: &str,
563 fs_entry: &str,
564 globals_layout: &wgpu::BindGroupLayout,
565 data_layout: &wgpu::BindGroupLayout,
566 topology: wgpu::PrimitiveTopology,
567 color_targets: &[Option<wgpu::ColorTargetState>],
568 sample_count: u32,
569 module: &wgpu::ShaderModule| {
570 let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
571 label: Some(&format!("{name}_layout")),
572 bind_group_layouts: &[globals_layout, data_layout],
573 immediate_size: 0,
574 });
575
576 device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
577 label: Some(name),
578 layout: Some(&pipeline_layout),
579 vertex: wgpu::VertexState {
580 module,
581 entry_point: Some(vs_entry),
582 buffers: &[],
583 compilation_options: wgpu::PipelineCompilationOptions::default(),
584 },
585 fragment: Some(wgpu::FragmentState {
586 module,
587 entry_point: Some(fs_entry),
588 targets: color_targets,
589 compilation_options: wgpu::PipelineCompilationOptions::default(),
590 }),
591 primitive: wgpu::PrimitiveState {
592 topology,
593 strip_index_format: None,
594 front_face: wgpu::FrontFace::Ccw,
595 cull_mode: None,
596 polygon_mode: wgpu::PolygonMode::Fill,
597 unclipped_depth: false,
598 conservative: false,
599 },
600 depth_stencil: None,
601 multisample: wgpu::MultisampleState {
602 count: sample_count,
603 mask: !0,
604 alpha_to_coverage_enabled: false,
605 },
606 multiview_mask: None,
607 cache: None,
608 })
609 };
610
611 let quads = create_pipeline(
612 "quads",
613 "vs_quad",
614 "fs_quad",
615 &layouts.globals,
616 &layouts.instances,
617 wgpu::PrimitiveTopology::TriangleStrip,
618 &[Some(color_target.clone())],
619 1,
620 &shader_module,
621 );
622
623 let shadows = create_pipeline(
624 "shadows",
625 "vs_shadow",
626 "fs_shadow",
627 &layouts.globals,
628 &layouts.instances,
629 wgpu::PrimitiveTopology::TriangleStrip,
630 &[Some(color_target.clone())],
631 1,
632 &shader_module,
633 );
634
635 let path_rasterization = create_pipeline(
636 "path_rasterization",
637 "vs_path_rasterization",
638 "fs_path_rasterization",
639 &layouts.globals,
640 &layouts.instances,
641 wgpu::PrimitiveTopology::TriangleList,
642 &[Some(wgpu::ColorTargetState {
643 format: surface_format,
644 blend: Some(wgpu::BlendState::PREMULTIPLIED_ALPHA_BLENDING),
645 write_mask: wgpu::ColorWrites::ALL,
646 })],
647 path_sample_count,
648 &shader_module,
649 );
650
651 let paths_blend = wgpu::BlendState {
652 color: wgpu::BlendComponent {
653 src_factor: wgpu::BlendFactor::One,
654 dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
655 operation: wgpu::BlendOperation::Add,
656 },
657 alpha: wgpu::BlendComponent {
658 src_factor: wgpu::BlendFactor::One,
659 dst_factor: wgpu::BlendFactor::One,
660 operation: wgpu::BlendOperation::Add,
661 },
662 };
663
664 let paths = create_pipeline(
665 "paths",
666 "vs_path",
667 "fs_path",
668 &layouts.globals,
669 &layouts.instances_with_texture,
670 wgpu::PrimitiveTopology::TriangleStrip,
671 &[Some(wgpu::ColorTargetState {
672 format: surface_format,
673 blend: Some(paths_blend),
674 write_mask: wgpu::ColorWrites::ALL,
675 })],
676 1,
677 &shader_module,
678 );
679
680 let underlines = create_pipeline(
681 "underlines",
682 "vs_underline",
683 "fs_underline",
684 &layouts.globals,
685 &layouts.instances,
686 wgpu::PrimitiveTopology::TriangleStrip,
687 &[Some(color_target.clone())],
688 1,
689 &shader_module,
690 );
691
692 let mono_sprites = create_pipeline(
693 "mono_sprites",
694 "vs_mono_sprite",
695 "fs_mono_sprite",
696 &layouts.globals,
697 &layouts.instances_with_texture,
698 wgpu::PrimitiveTopology::TriangleStrip,
699 &[Some(color_target.clone())],
700 1,
701 &shader_module,
702 );
703
704 let subpixel_sprites = if let Some(subpixel_module) = &subpixel_shader_module {
705 let subpixel_blend = wgpu::BlendState {
706 color: wgpu::BlendComponent {
707 src_factor: wgpu::BlendFactor::Src1,
708 dst_factor: wgpu::BlendFactor::OneMinusSrc1,
709 operation: wgpu::BlendOperation::Add,
710 },
711 alpha: wgpu::BlendComponent {
712 src_factor: wgpu::BlendFactor::One,
713 dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
714 operation: wgpu::BlendOperation::Add,
715 },
716 };
717
718 Some(create_pipeline(
719 "subpixel_sprites",
720 "vs_subpixel_sprite",
721 "fs_subpixel_sprite",
722 &layouts.globals,
723 &layouts.instances_with_texture,
724 wgpu::PrimitiveTopology::TriangleStrip,
725 &[Some(wgpu::ColorTargetState {
726 format: surface_format,
727 blend: Some(subpixel_blend),
728 write_mask: wgpu::ColorWrites::COLOR,
729 })],
730 1,
731 subpixel_module,
732 ))
733 } else {
734 None
735 };
736
737 let poly_sprites = create_pipeline(
738 "poly_sprites",
739 "vs_poly_sprite",
740 "fs_poly_sprite",
741 &layouts.globals,
742 &layouts.instances_with_texture,
743 wgpu::PrimitiveTopology::TriangleStrip,
744 &[Some(color_target.clone())],
745 1,
746 &shader_module,
747 );
748
749 let surfaces = create_pipeline(
750 "surfaces",
751 "vs_surface",
752 "fs_surface",
753 &layouts.globals,
754 &layouts.surfaces,
755 wgpu::PrimitiveTopology::TriangleStrip,
756 &[Some(color_target)],
757 1,
758 &shader_module,
759 );
760
761 WgpuPipelines {
762 quads,
763 shadows,
764 path_rasterization,
765 paths,
766 underlines,
767 mono_sprites,
768 subpixel_sprites,
769 poly_sprites,
770 surfaces,
771 }
772 }
773
774 fn create_path_intermediate(
775 device: &wgpu::Device,
776 format: wgpu::TextureFormat,
777 width: u32,
778 height: u32,
779 ) -> (wgpu::Texture, wgpu::TextureView) {
780 let texture = device.create_texture(&wgpu::TextureDescriptor {
781 label: Some("path_intermediate"),
782 size: wgpu::Extent3d {
783 width: width.max(1),
784 height: height.max(1),
785 depth_or_array_layers: 1,
786 },
787 mip_level_count: 1,
788 sample_count: 1,
789 dimension: wgpu::TextureDimension::D2,
790 format,
791 usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::TEXTURE_BINDING,
792 view_formats: &[],
793 });
794 let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
795 (texture, view)
796 }
797
798 fn create_msaa_if_needed(
799 device: &wgpu::Device,
800 format: wgpu::TextureFormat,
801 width: u32,
802 height: u32,
803 sample_count: u32,
804 ) -> Option<(wgpu::Texture, wgpu::TextureView)> {
805 if sample_count <= 1 {
806 return None;
807 }
808 let texture = device.create_texture(&wgpu::TextureDescriptor {
809 label: Some("path_msaa"),
810 size: wgpu::Extent3d {
811 width: width.max(1),
812 height: height.max(1),
813 depth_or_array_layers: 1,
814 },
815 mip_level_count: 1,
816 sample_count,
817 dimension: wgpu::TextureDimension::D2,
818 format,
819 usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
820 view_formats: &[],
821 });
822 let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
823 Some((texture, view))
824 }
825
826 pub fn update_drawable_size(&mut self, size: Size<DevicePixels>) {
827 let width = size.width.0 as u32;
828 let height = size.height.0 as u32;
829
830 if width != self.surface_config.width || height != self.surface_config.height {
831 let clamped_width = width.min(self.max_texture_size);
832 let clamped_height = height.min(self.max_texture_size);
833
834 if clamped_width != width || clamped_height != height {
835 warn!(
836 "Requested surface size ({}, {}) exceeds maximum texture dimension {}. \
837 Clamping to ({}, {}). Window content may not fill the entire window.",
838 width, height, self.max_texture_size, clamped_width, clamped_height
839 );
840 }
841
842 // Wait for any in-flight GPU work to complete before destroying textures
843 if let Err(e) = self.device.poll(wgpu::PollType::Wait {
844 submission_index: None,
845 timeout: None,
846 }) {
847 warn!("Failed to poll device during resize: {e:?}");
848 }
849
850 // Destroy old textures before allocating new ones to avoid GPU memory spikes
851 if let Some(ref texture) = self.path_intermediate_texture {
852 texture.destroy();
853 }
854 if let Some(ref texture) = self.path_msaa_texture {
855 texture.destroy();
856 }
857
858 self.surface_config.width = clamped_width.max(1);
859 self.surface_config.height = clamped_height.max(1);
860 self.surface.configure(&self.device, &self.surface_config);
861
862 // Invalidate intermediate textures - they will be lazily recreated
863 // in draw() after we confirm the surface is healthy. This avoids
864 // panics when the device/surface is in an invalid state during resize.
865 self.path_intermediate_texture = None;
866 self.path_intermediate_view = None;
867 self.path_msaa_texture = None;
868 self.path_msaa_view = None;
869 }
870 }
871
872 fn ensure_intermediate_textures(&mut self) {
873 if self.path_intermediate_texture.is_some() {
874 return;
875 }
876
877 let (path_intermediate_texture, path_intermediate_view) = {
878 let (t, v) = Self::create_path_intermediate(
879 &self.device,
880 self.surface_config.format,
881 self.surface_config.width,
882 self.surface_config.height,
883 );
884 (Some(t), Some(v))
885 };
886 self.path_intermediate_texture = path_intermediate_texture;
887 self.path_intermediate_view = path_intermediate_view;
888
889 let (path_msaa_texture, path_msaa_view) = Self::create_msaa_if_needed(
890 &self.device,
891 self.surface_config.format,
892 self.surface_config.width,
893 self.surface_config.height,
894 self.rendering_params.path_sample_count,
895 )
896 .map(|(t, v)| (Some(t), Some(v)))
897 .unwrap_or((None, None));
898 self.path_msaa_texture = path_msaa_texture;
899 self.path_msaa_view = path_msaa_view;
900 }
901
902 pub fn update_transparency(&mut self, transparent: bool) {
903 let new_alpha_mode = if transparent {
904 self.transparent_alpha_mode
905 } else {
906 self.opaque_alpha_mode
907 };
908
909 if new_alpha_mode != self.surface_config.alpha_mode {
910 self.surface_config.alpha_mode = new_alpha_mode;
911 self.surface.configure(&self.device, &self.surface_config);
912 self.pipelines = Self::create_pipelines(
913 &self.device,
914 &self.bind_group_layouts,
915 self.surface_config.format,
916 self.surface_config.alpha_mode,
917 self.rendering_params.path_sample_count,
918 self.dual_source_blending,
919 );
920 }
921 }
922
923 #[allow(dead_code)]
924 pub fn viewport_size(&self) -> Size<DevicePixels> {
925 Size {
926 width: DevicePixels(self.surface_config.width as i32),
927 height: DevicePixels(self.surface_config.height as i32),
928 }
929 }
930
931 pub fn sprite_atlas(&self) -> &Arc<WgpuAtlas> {
932 &self.atlas
933 }
934
935 pub fn supports_dual_source_blending(&self) -> bool {
936 self.dual_source_blending
937 }
938
939 pub fn gpu_specs(&self) -> GpuSpecs {
940 GpuSpecs {
941 is_software_emulated: self.adapter_info.device_type == wgpu::DeviceType::Cpu,
942 device_name: self.adapter_info.name.clone(),
943 driver_name: self.adapter_info.driver.clone(),
944 driver_info: self.adapter_info.driver_info.clone(),
945 }
946 }
947
948 pub fn max_texture_size(&self) -> u32 {
949 self.max_texture_size
950 }
951
952 pub fn draw(&mut self, scene: &Scene) {
953 self.atlas.before_frame();
954
955 let frame = match self.surface.get_current_texture() {
956 Ok(frame) => frame,
957 Err(wgpu::SurfaceError::Lost | wgpu::SurfaceError::Outdated) => {
958 self.surface.configure(&self.device, &self.surface_config);
959 return;
960 }
961 Err(e) => {
962 log::error!("Failed to acquire surface texture: {e}");
963 return;
964 }
965 };
966
967 // Now that we know the surface is healthy, ensure intermediate textures exist
968 self.ensure_intermediate_textures();
969
970 let frame_view = frame
971 .texture
972 .create_view(&wgpu::TextureViewDescriptor::default());
973
974 let gamma_params = GammaParams {
975 gamma_ratios: self.rendering_params.gamma_ratios,
976 grayscale_enhanced_contrast: self.rendering_params.grayscale_enhanced_contrast,
977 subpixel_enhanced_contrast: self.rendering_params.subpixel_enhanced_contrast,
978 _pad: [0.0; 2],
979 };
980
981 let globals = GlobalParams {
982 viewport_size: [
983 self.surface_config.width as f32,
984 self.surface_config.height as f32,
985 ],
986 premultiplied_alpha: if self.surface_config.alpha_mode
987 == wgpu::CompositeAlphaMode::PreMultiplied
988 {
989 1
990 } else {
991 0
992 },
993 pad: 0,
994 };
995
996 let path_globals = GlobalParams {
997 premultiplied_alpha: 0,
998 ..globals
999 };
1000
1001 self.queue
1002 .write_buffer(&self.globals_buffer, 0, bytemuck::bytes_of(&globals));
1003 self.queue.write_buffer(
1004 &self.globals_buffer,
1005 self.path_globals_offset,
1006 bytemuck::bytes_of(&path_globals),
1007 );
1008 self.queue.write_buffer(
1009 &self.globals_buffer,
1010 self.gamma_offset,
1011 bytemuck::bytes_of(&gamma_params),
1012 );
1013
1014 loop {
1015 let mut instance_offset: u64 = 0;
1016 let mut overflow = false;
1017
1018 let mut encoder = self
1019 .device
1020 .create_command_encoder(&wgpu::CommandEncoderDescriptor {
1021 label: Some("main_encoder"),
1022 });
1023
1024 {
1025 let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
1026 label: Some("main_pass"),
1027 color_attachments: &[Some(wgpu::RenderPassColorAttachment {
1028 view: &frame_view,
1029 resolve_target: None,
1030 ops: wgpu::Operations {
1031 load: wgpu::LoadOp::Clear(wgpu::Color::TRANSPARENT),
1032 store: wgpu::StoreOp::Store,
1033 },
1034 depth_slice: None,
1035 })],
1036 depth_stencil_attachment: None,
1037 ..Default::default()
1038 });
1039
1040 for batch in scene.batches() {
1041 let ok = match batch {
1042 PrimitiveBatch::Quads(range) => {
1043 self.draw_quads(&scene.quads[range], &mut instance_offset, &mut pass)
1044 }
1045 PrimitiveBatch::Shadows(range) => self.draw_shadows(
1046 &scene.shadows[range],
1047 &mut instance_offset,
1048 &mut pass,
1049 ),
1050 PrimitiveBatch::Paths(range) => {
1051 let paths = &scene.paths[range];
1052 if paths.is_empty() {
1053 continue;
1054 }
1055
1056 drop(pass);
1057
1058 let did_draw = self.draw_paths_to_intermediate(
1059 &mut encoder,
1060 paths,
1061 &mut instance_offset,
1062 );
1063
1064 pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
1065 label: Some("main_pass_continued"),
1066 color_attachments: &[Some(wgpu::RenderPassColorAttachment {
1067 view: &frame_view,
1068 resolve_target: None,
1069 ops: wgpu::Operations {
1070 load: wgpu::LoadOp::Load,
1071 store: wgpu::StoreOp::Store,
1072 },
1073 depth_slice: None,
1074 })],
1075 depth_stencil_attachment: None,
1076 ..Default::default()
1077 });
1078
1079 if did_draw {
1080 self.draw_paths_from_intermediate(
1081 paths,
1082 &mut instance_offset,
1083 &mut pass,
1084 )
1085 } else {
1086 false
1087 }
1088 }
1089 PrimitiveBatch::Underlines(range) => self.draw_underlines(
1090 &scene.underlines[range],
1091 &mut instance_offset,
1092 &mut pass,
1093 ),
1094 PrimitiveBatch::MonochromeSprites { texture_id, range } => self
1095 .draw_monochrome_sprites(
1096 &scene.monochrome_sprites[range],
1097 texture_id,
1098 &mut instance_offset,
1099 &mut pass,
1100 ),
1101 PrimitiveBatch::SubpixelSprites { texture_id, range } => self
1102 .draw_subpixel_sprites(
1103 &scene.subpixel_sprites[range],
1104 texture_id,
1105 &mut instance_offset,
1106 &mut pass,
1107 ),
1108 PrimitiveBatch::PolychromeSprites { texture_id, range } => self
1109 .draw_polychrome_sprites(
1110 &scene.polychrome_sprites[range],
1111 texture_id,
1112 &mut instance_offset,
1113 &mut pass,
1114 ),
1115 PrimitiveBatch::Surfaces(_surfaces) => {
1116 // Surfaces are macOS-only for video playback
1117 // Not implemented for Linux/wgpu
1118 true
1119 }
1120 };
1121 if !ok {
1122 overflow = true;
1123 break;
1124 }
1125 }
1126 }
1127
1128 if overflow {
1129 drop(encoder);
1130 if self.instance_buffer_capacity >= self.max_buffer_size {
1131 log::error!(
1132 "instance buffer size grew too large: {}",
1133 self.instance_buffer_capacity
1134 );
1135 frame.present();
1136 return;
1137 }
1138 self.grow_instance_buffer();
1139 continue;
1140 }
1141
1142 self.queue.submit(std::iter::once(encoder.finish()));
1143 frame.present();
1144 return;
1145 }
1146 }
1147
1148 fn draw_quads(
1149 &self,
1150 quads: &[Quad],
1151 instance_offset: &mut u64,
1152 pass: &mut wgpu::RenderPass<'_>,
1153 ) -> bool {
1154 let data = unsafe { Self::instance_bytes(quads) };
1155 self.draw_instances(
1156 data,
1157 quads.len() as u32,
1158 &self.pipelines.quads,
1159 instance_offset,
1160 pass,
1161 )
1162 }
1163
1164 fn draw_shadows(
1165 &self,
1166 shadows: &[Shadow],
1167 instance_offset: &mut u64,
1168 pass: &mut wgpu::RenderPass<'_>,
1169 ) -> bool {
1170 let data = unsafe { Self::instance_bytes(shadows) };
1171 self.draw_instances(
1172 data,
1173 shadows.len() as u32,
1174 &self.pipelines.shadows,
1175 instance_offset,
1176 pass,
1177 )
1178 }
1179
1180 fn draw_underlines(
1181 &self,
1182 underlines: &[Underline],
1183 instance_offset: &mut u64,
1184 pass: &mut wgpu::RenderPass<'_>,
1185 ) -> bool {
1186 let data = unsafe { Self::instance_bytes(underlines) };
1187 self.draw_instances(
1188 data,
1189 underlines.len() as u32,
1190 &self.pipelines.underlines,
1191 instance_offset,
1192 pass,
1193 )
1194 }
1195
1196 fn draw_monochrome_sprites(
1197 &self,
1198 sprites: &[MonochromeSprite],
1199 texture_id: AtlasTextureId,
1200 instance_offset: &mut u64,
1201 pass: &mut wgpu::RenderPass<'_>,
1202 ) -> bool {
1203 let tex_info = self.atlas.get_texture_info(texture_id);
1204 let data = unsafe { Self::instance_bytes(sprites) };
1205 self.draw_instances_with_texture(
1206 data,
1207 sprites.len() as u32,
1208 &tex_info.view,
1209 &self.pipelines.mono_sprites,
1210 instance_offset,
1211 pass,
1212 )
1213 }
1214
1215 fn draw_subpixel_sprites(
1216 &self,
1217 sprites: &[SubpixelSprite],
1218 texture_id: AtlasTextureId,
1219 instance_offset: &mut u64,
1220 pass: &mut wgpu::RenderPass<'_>,
1221 ) -> bool {
1222 let tex_info = self.atlas.get_texture_info(texture_id);
1223 let data = unsafe { Self::instance_bytes(sprites) };
1224 let pipeline = self
1225 .pipelines
1226 .subpixel_sprites
1227 .as_ref()
1228 .unwrap_or(&self.pipelines.mono_sprites);
1229 self.draw_instances_with_texture(
1230 data,
1231 sprites.len() as u32,
1232 &tex_info.view,
1233 pipeline,
1234 instance_offset,
1235 pass,
1236 )
1237 }
1238
1239 fn draw_polychrome_sprites(
1240 &self,
1241 sprites: &[PolychromeSprite],
1242 texture_id: AtlasTextureId,
1243 instance_offset: &mut u64,
1244 pass: &mut wgpu::RenderPass<'_>,
1245 ) -> bool {
1246 let tex_info = self.atlas.get_texture_info(texture_id);
1247 let data = unsafe { Self::instance_bytes(sprites) };
1248 self.draw_instances_with_texture(
1249 data,
1250 sprites.len() as u32,
1251 &tex_info.view,
1252 &self.pipelines.poly_sprites,
1253 instance_offset,
1254 pass,
1255 )
1256 }
1257
1258 fn draw_instances(
1259 &self,
1260 data: &[u8],
1261 instance_count: u32,
1262 pipeline: &wgpu::RenderPipeline,
1263 instance_offset: &mut u64,
1264 pass: &mut wgpu::RenderPass<'_>,
1265 ) -> bool {
1266 if instance_count == 0 {
1267 return true;
1268 }
1269 let Some((offset, size)) = self.write_to_instance_buffer(instance_offset, data) else {
1270 return false;
1271 };
1272 let bind_group = self.device.create_bind_group(&wgpu::BindGroupDescriptor {
1273 label: None,
1274 layout: &self.bind_group_layouts.instances,
1275 entries: &[wgpu::BindGroupEntry {
1276 binding: 0,
1277 resource: self.instance_binding(offset, size),
1278 }],
1279 });
1280 pass.set_pipeline(pipeline);
1281 pass.set_bind_group(0, &self.globals_bind_group, &[]);
1282 pass.set_bind_group(1, &bind_group, &[]);
1283 pass.draw(0..4, 0..instance_count);
1284 true
1285 }
1286
1287 fn draw_instances_with_texture(
1288 &self,
1289 data: &[u8],
1290 instance_count: u32,
1291 texture_view: &wgpu::TextureView,
1292 pipeline: &wgpu::RenderPipeline,
1293 instance_offset: &mut u64,
1294 pass: &mut wgpu::RenderPass<'_>,
1295 ) -> bool {
1296 if instance_count == 0 {
1297 return true;
1298 }
1299 let Some((offset, size)) = self.write_to_instance_buffer(instance_offset, data) else {
1300 return false;
1301 };
1302 let bind_group = self.device.create_bind_group(&wgpu::BindGroupDescriptor {
1303 label: None,
1304 layout: &self.bind_group_layouts.instances_with_texture,
1305 entries: &[
1306 wgpu::BindGroupEntry {
1307 binding: 0,
1308 resource: self.instance_binding(offset, size),
1309 },
1310 wgpu::BindGroupEntry {
1311 binding: 1,
1312 resource: wgpu::BindingResource::TextureView(texture_view),
1313 },
1314 wgpu::BindGroupEntry {
1315 binding: 2,
1316 resource: wgpu::BindingResource::Sampler(&self.atlas_sampler),
1317 },
1318 ],
1319 });
1320 pass.set_pipeline(pipeline);
1321 pass.set_bind_group(0, &self.globals_bind_group, &[]);
1322 pass.set_bind_group(1, &bind_group, &[]);
1323 pass.draw(0..4, 0..instance_count);
1324 true
1325 }
1326
1327 unsafe fn instance_bytes<T>(instances: &[T]) -> &[u8] {
1328 unsafe {
1329 std::slice::from_raw_parts(
1330 instances.as_ptr() as *const u8,
1331 std::mem::size_of_val(instances),
1332 )
1333 }
1334 }
1335
1336 fn draw_paths_from_intermediate(
1337 &self,
1338 paths: &[Path<ScaledPixels>],
1339 instance_offset: &mut u64,
1340 pass: &mut wgpu::RenderPass<'_>,
1341 ) -> bool {
1342 let first_path = &paths[0];
1343 let sprites: Vec<PathSprite> = if paths.last().map(|p| &p.order) == Some(&first_path.order)
1344 {
1345 paths
1346 .iter()
1347 .map(|p| PathSprite {
1348 bounds: p.clipped_bounds(),
1349 })
1350 .collect()
1351 } else {
1352 let mut bounds = first_path.clipped_bounds();
1353 for path in paths.iter().skip(1) {
1354 bounds = bounds.union(&path.clipped_bounds());
1355 }
1356 vec![PathSprite { bounds }]
1357 };
1358
1359 let Some(path_intermediate_view) = self.path_intermediate_view.as_ref() else {
1360 return true;
1361 };
1362
1363 let sprite_data = unsafe { Self::instance_bytes(&sprites) };
1364 self.draw_instances_with_texture(
1365 sprite_data,
1366 sprites.len() as u32,
1367 path_intermediate_view,
1368 &self.pipelines.paths,
1369 instance_offset,
1370 pass,
1371 )
1372 }
1373
1374 fn draw_paths_to_intermediate(
1375 &self,
1376 encoder: &mut wgpu::CommandEncoder,
1377 paths: &[Path<ScaledPixels>],
1378 instance_offset: &mut u64,
1379 ) -> bool {
1380 let mut vertices = Vec::new();
1381 for path in paths {
1382 let bounds = path.clipped_bounds();
1383 vertices.extend(path.vertices.iter().map(|v| PathRasterizationVertex {
1384 xy_position: v.xy_position,
1385 st_position: v.st_position,
1386 color: path.color,
1387 bounds,
1388 }));
1389 }
1390
1391 if vertices.is_empty() {
1392 return true;
1393 }
1394
1395 let vertex_data = unsafe { Self::instance_bytes(&vertices) };
1396 let Some((vertex_offset, vertex_size)) =
1397 self.write_to_instance_buffer(instance_offset, vertex_data)
1398 else {
1399 return false;
1400 };
1401
1402 let data_bind_group = self.device.create_bind_group(&wgpu::BindGroupDescriptor {
1403 label: Some("path_rasterization_bind_group"),
1404 layout: &self.bind_group_layouts.instances,
1405 entries: &[wgpu::BindGroupEntry {
1406 binding: 0,
1407 resource: self.instance_binding(vertex_offset, vertex_size),
1408 }],
1409 });
1410
1411 let Some(path_intermediate_view) = self.path_intermediate_view.as_ref() else {
1412 return true;
1413 };
1414
1415 let (target_view, resolve_target) = if let Some(ref msaa_view) = self.path_msaa_view {
1416 (msaa_view, Some(path_intermediate_view))
1417 } else {
1418 (path_intermediate_view, None)
1419 };
1420
1421 {
1422 let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
1423 label: Some("path_rasterization_pass"),
1424 color_attachments: &[Some(wgpu::RenderPassColorAttachment {
1425 view: target_view,
1426 resolve_target,
1427 ops: wgpu::Operations {
1428 load: wgpu::LoadOp::Clear(wgpu::Color::TRANSPARENT),
1429 store: wgpu::StoreOp::Store,
1430 },
1431 depth_slice: None,
1432 })],
1433 depth_stencil_attachment: None,
1434 ..Default::default()
1435 });
1436
1437 pass.set_pipeline(&self.pipelines.path_rasterization);
1438 pass.set_bind_group(0, &self.path_globals_bind_group, &[]);
1439 pass.set_bind_group(1, &data_bind_group, &[]);
1440 pass.draw(0..vertices.len() as u32, 0..1);
1441 }
1442
1443 true
1444 }
1445
1446 fn grow_instance_buffer(&mut self) {
1447 let new_capacity = (self.instance_buffer_capacity * 2).min(self.max_buffer_size);
1448 log::info!("increased instance buffer size to {}", new_capacity);
1449 self.instance_buffer = self.device.create_buffer(&wgpu::BufferDescriptor {
1450 label: Some("instance_buffer"),
1451 size: new_capacity,
1452 usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
1453 mapped_at_creation: false,
1454 });
1455 self.instance_buffer_capacity = new_capacity;
1456 }
1457
1458 fn write_to_instance_buffer(
1459 &self,
1460 instance_offset: &mut u64,
1461 data: &[u8],
1462 ) -> Option<(u64, NonZeroU64)> {
1463 let offset = (*instance_offset).next_multiple_of(self.storage_buffer_alignment);
1464 let size = (data.len() as u64).max(16);
1465 if offset + size > self.instance_buffer_capacity {
1466 return None;
1467 }
1468 self.queue.write_buffer(&self.instance_buffer, offset, data);
1469 *instance_offset = offset + size;
1470 Some((offset, NonZeroU64::new(size).expect("size is at least 16")))
1471 }
1472
1473 fn instance_binding(&self, offset: u64, size: NonZeroU64) -> wgpu::BindingResource<'_> {
1474 wgpu::BindingResource::Buffer(wgpu::BufferBinding {
1475 buffer: &self.instance_buffer,
1476 offset,
1477 size: Some(size),
1478 })
1479 }
1480
1481 pub fn destroy(&mut self) {
1482 // wgpu resources are automatically cleaned up when dropped
1483 }
1484}
1485
1486struct RenderingParameters {
1487 path_sample_count: u32,
1488 gamma_ratios: [f32; 4],
1489 grayscale_enhanced_contrast: f32,
1490 subpixel_enhanced_contrast: f32,
1491}
1492
1493impl RenderingParameters {
1494 fn new(adapter: &wgpu::Adapter, surface_format: wgpu::TextureFormat) -> Self {
1495 use std::env;
1496
1497 let format_features = adapter.get_texture_format_features(surface_format);
1498 let path_sample_count = [4, 2, 1]
1499 .into_iter()
1500 .find(|&n| format_features.flags.sample_count_supported(n))
1501 .unwrap_or(1);
1502
1503 let gamma = env::var("ZED_FONTS_GAMMA")
1504 .ok()
1505 .and_then(|v| v.parse().ok())
1506 .unwrap_or(1.8_f32)
1507 .clamp(1.0, 2.2);
1508 let gamma_ratios = get_gamma_correction_ratios(gamma);
1509
1510 let grayscale_enhanced_contrast = env::var("ZED_FONTS_GRAYSCALE_ENHANCED_CONTRAST")
1511 .ok()
1512 .and_then(|v| v.parse().ok())
1513 .unwrap_or(1.0_f32)
1514 .max(0.0);
1515
1516 let subpixel_enhanced_contrast = env::var("ZED_FONTS_SUBPIXEL_ENHANCED_CONTRAST")
1517 .ok()
1518 .and_then(|v| v.parse().ok())
1519 .unwrap_or(0.5_f32)
1520 .max(0.0);
1521
1522 Self {
1523 path_sample_count,
1524 gamma_ratios,
1525 grayscale_enhanced_contrast,
1526 subpixel_enhanced_contrast,
1527 }
1528 }
1529}