1#[cfg(not(target_family = "wasm"))]
2use crate::CompositorGpuHint;
3use crate::{WgpuAtlas, WgpuContext};
4use bytemuck::{Pod, Zeroable};
5use gpui::{
6 AtlasTextureId, Background, Bounds, DevicePixels, GpuSpecs, MonochromeSprite, Path, Point,
7 PolychromeSprite, PrimitiveBatch, Quad, ScaledPixels, Scene, Shadow, Size, SubpixelSprite,
8 Underline, get_gamma_correction_ratios,
9};
10use log::warn;
11#[cfg(not(target_family = "wasm"))]
12use raw_window_handle::{HasDisplayHandle, HasWindowHandle};
13use std::num::NonZeroU64;
14use std::sync::Arc;
15
16#[repr(C)]
17#[derive(Clone, Copy, Pod, Zeroable)]
18struct GlobalParams {
19 viewport_size: [f32; 2],
20 premultiplied_alpha: u32,
21 pad: u32,
22}
23
24#[repr(C)]
25#[derive(Clone, Copy, Pod, Zeroable)]
26struct PodBounds {
27 origin: [f32; 2],
28 size: [f32; 2],
29}
30
31impl From<Bounds<ScaledPixels>> for PodBounds {
32 fn from(bounds: Bounds<ScaledPixels>) -> Self {
33 Self {
34 origin: [bounds.origin.x.0, bounds.origin.y.0],
35 size: [bounds.size.width.0, bounds.size.height.0],
36 }
37 }
38}
39
40#[repr(C)]
41#[derive(Clone, Copy, Pod, Zeroable)]
42struct SurfaceParams {
43 bounds: PodBounds,
44 content_mask: PodBounds,
45}
46
47#[repr(C)]
48#[derive(Clone, Copy, Pod, Zeroable)]
49struct GammaParams {
50 gamma_ratios: [f32; 4],
51 grayscale_enhanced_contrast: f32,
52 subpixel_enhanced_contrast: f32,
53 _pad: [f32; 2],
54}
55
56#[derive(Clone, Debug)]
57#[repr(C)]
58struct PathSprite {
59 bounds: Bounds<ScaledPixels>,
60}
61
62#[derive(Clone, Debug)]
63#[repr(C)]
64struct PathRasterizationVertex {
65 xy_position: Point<ScaledPixels>,
66 st_position: Point<f32>,
67 color: Background,
68 bounds: Bounds<ScaledPixels>,
69}
70
71pub struct WgpuSurfaceConfig {
72 pub size: Size<DevicePixels>,
73 pub transparent: bool,
74}
75
76struct WgpuPipelines {
77 quads: wgpu::RenderPipeline,
78 shadows: wgpu::RenderPipeline,
79 path_rasterization: wgpu::RenderPipeline,
80 paths: wgpu::RenderPipeline,
81 underlines: wgpu::RenderPipeline,
82 mono_sprites: wgpu::RenderPipeline,
83 subpixel_sprites: Option<wgpu::RenderPipeline>,
84 poly_sprites: wgpu::RenderPipeline,
85 #[allow(dead_code)]
86 surfaces: wgpu::RenderPipeline,
87}
88
89struct WgpuBindGroupLayouts {
90 globals: wgpu::BindGroupLayout,
91 instances: wgpu::BindGroupLayout,
92 instances_with_texture: wgpu::BindGroupLayout,
93 surfaces: wgpu::BindGroupLayout,
94}
95
96pub struct WgpuRenderer {
97 device: Arc<wgpu::Device>,
98 queue: Arc<wgpu::Queue>,
99 surface: wgpu::Surface<'static>,
100 surface_config: wgpu::SurfaceConfiguration,
101 surface_configured: bool,
102 pipelines: WgpuPipelines,
103 bind_group_layouts: WgpuBindGroupLayouts,
104 atlas: Arc<WgpuAtlas>,
105 atlas_sampler: wgpu::Sampler,
106 globals_buffer: wgpu::Buffer,
107 path_globals_offset: u64,
108 gamma_offset: u64,
109 globals_bind_group: wgpu::BindGroup,
110 path_globals_bind_group: wgpu::BindGroup,
111 instance_buffer: wgpu::Buffer,
112 instance_buffer_capacity: u64,
113 max_buffer_size: u64,
114 storage_buffer_alignment: u64,
115 path_intermediate_texture: Option<wgpu::Texture>,
116 path_intermediate_view: Option<wgpu::TextureView>,
117 path_msaa_texture: Option<wgpu::Texture>,
118 path_msaa_view: Option<wgpu::TextureView>,
119 rendering_params: RenderingParameters,
120 dual_source_blending: bool,
121 adapter_info: wgpu::AdapterInfo,
122 transparent_alpha_mode: wgpu::CompositeAlphaMode,
123 opaque_alpha_mode: wgpu::CompositeAlphaMode,
124 max_texture_size: u32,
125}
126
127impl WgpuRenderer {
128 /// Creates a new WgpuRenderer from raw window handles.
129 ///
130 /// # Safety
131 /// The caller must ensure that the window handle remains valid for the lifetime
132 /// of the returned renderer.
133 #[cfg(not(target_family = "wasm"))]
134 pub fn new<W: HasWindowHandle + HasDisplayHandle>(
135 gpu_context: &mut Option<WgpuContext>,
136 window: &W,
137 config: WgpuSurfaceConfig,
138 compositor_gpu: Option<CompositorGpuHint>,
139 ) -> anyhow::Result<Self> {
140 let window_handle = window
141 .window_handle()
142 .map_err(|e| anyhow::anyhow!("Failed to get window handle: {e}"))?;
143 let display_handle = window
144 .display_handle()
145 .map_err(|e| anyhow::anyhow!("Failed to get display handle: {e}"))?;
146
147 let target = wgpu::SurfaceTargetUnsafe::RawHandle {
148 raw_display_handle: display_handle.as_raw(),
149 raw_window_handle: window_handle.as_raw(),
150 };
151
152 // Use the existing context's instance if available, otherwise create a new one.
153 // The surface must be created with the same instance that will be used for
154 // adapter selection, otherwise wgpu will panic.
155 let instance = gpu_context
156 .as_ref()
157 .map(|ctx| ctx.instance.clone())
158 .unwrap_or_else(WgpuContext::instance);
159
160 // Safety: The caller guarantees that the window handle is valid for the
161 // lifetime of this renderer. In practice, the RawWindow struct is created
162 // from the native window handles and the surface is dropped before the window.
163 let surface = unsafe {
164 instance
165 .create_surface_unsafe(target)
166 .map_err(|e| anyhow::anyhow!("Failed to create surface: {e}"))?
167 };
168
169 let context = match gpu_context {
170 Some(context) => {
171 context.check_compatible_with_surface(&surface)?;
172 context
173 }
174 None => gpu_context.insert(WgpuContext::new(instance, &surface, compositor_gpu)?),
175 };
176
177 Self::new_with_surface(context, surface, config)
178 }
179
180 #[cfg(target_family = "wasm")]
181 pub fn new_from_canvas(
182 context: &WgpuContext,
183 canvas: &web_sys::HtmlCanvasElement,
184 config: WgpuSurfaceConfig,
185 ) -> anyhow::Result<Self> {
186 let surface = context
187 .instance
188 .create_surface(wgpu::SurfaceTarget::Canvas(canvas.clone()))
189 .map_err(|e| anyhow::anyhow!("Failed to create surface: {e}"))?;
190 Self::new_with_surface(context, surface, config)
191 }
192
193 fn new_with_surface(
194 context: &WgpuContext,
195 surface: wgpu::Surface<'static>,
196 config: WgpuSurfaceConfig,
197 ) -> anyhow::Result<Self> {
198 let surface_caps = surface.get_capabilities(&context.adapter);
199 let preferred_formats = [
200 wgpu::TextureFormat::Bgra8Unorm,
201 wgpu::TextureFormat::Rgba8Unorm,
202 ];
203 let surface_format = preferred_formats
204 .iter()
205 .find(|f| surface_caps.formats.contains(f))
206 .copied()
207 .or_else(|| surface_caps.formats.iter().find(|f| !f.is_srgb()).copied())
208 .or_else(|| surface_caps.formats.first().copied())
209 .ok_or_else(|| {
210 anyhow::anyhow!(
211 "Surface reports no supported texture formats for adapter {:?}",
212 context.adapter.get_info().name
213 )
214 })?;
215
216 let pick_alpha_mode =
217 |preferences: &[wgpu::CompositeAlphaMode]| -> anyhow::Result<wgpu::CompositeAlphaMode> {
218 preferences
219 .iter()
220 .find(|p| surface_caps.alpha_modes.contains(p))
221 .copied()
222 .or_else(|| surface_caps.alpha_modes.first().copied())
223 .ok_or_else(|| {
224 anyhow::anyhow!(
225 "Surface reports no supported alpha modes for adapter {:?}",
226 context.adapter.get_info().name
227 )
228 })
229 };
230
231 let transparent_alpha_mode = pick_alpha_mode(&[
232 wgpu::CompositeAlphaMode::PreMultiplied,
233 wgpu::CompositeAlphaMode::Inherit,
234 ])?;
235
236 let opaque_alpha_mode = pick_alpha_mode(&[
237 wgpu::CompositeAlphaMode::Opaque,
238 wgpu::CompositeAlphaMode::Inherit,
239 ])?;
240
241 let alpha_mode = if config.transparent {
242 transparent_alpha_mode
243 } else {
244 opaque_alpha_mode
245 };
246
247 let device = Arc::clone(&context.device);
248 let max_texture_size = device.limits().max_texture_dimension_2d;
249
250 let requested_width = config.size.width.0 as u32;
251 let requested_height = config.size.height.0 as u32;
252 let clamped_width = requested_width.min(max_texture_size);
253 let clamped_height = requested_height.min(max_texture_size);
254
255 if clamped_width != requested_width || clamped_height != requested_height {
256 warn!(
257 "Requested surface size ({}, {}) exceeds maximum texture dimension {}. \
258 Clamping to ({}, {}). Window content may not fill the entire window.",
259 requested_width, requested_height, max_texture_size, clamped_width, clamped_height
260 );
261 }
262
263 let surface_config = wgpu::SurfaceConfiguration {
264 usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
265 format: surface_format,
266 width: clamped_width.max(1),
267 height: clamped_height.max(1),
268 present_mode: wgpu::PresentMode::Fifo,
269 desired_maximum_frame_latency: 2,
270 alpha_mode,
271 view_formats: vec![],
272 };
273 // Configure the surface immediately. The adapter selection process already validated
274 // that this adapter can successfully configure this surface.
275 surface.configure(&context.device, &surface_config);
276
277 let queue = Arc::clone(&context.queue);
278 let dual_source_blending = context.supports_dual_source_blending();
279
280 let rendering_params = RenderingParameters::new(&context.adapter, surface_format);
281 let bind_group_layouts = Self::create_bind_group_layouts(&device);
282 let pipelines = Self::create_pipelines(
283 &device,
284 &bind_group_layouts,
285 surface_format,
286 alpha_mode,
287 rendering_params.path_sample_count,
288 dual_source_blending,
289 );
290
291 let atlas = Arc::new(WgpuAtlas::new(Arc::clone(&device), Arc::clone(&queue)));
292 let atlas_sampler = device.create_sampler(&wgpu::SamplerDescriptor {
293 label: Some("atlas_sampler"),
294 mag_filter: wgpu::FilterMode::Linear,
295 min_filter: wgpu::FilterMode::Linear,
296 ..Default::default()
297 });
298
299 let uniform_alignment = device.limits().min_uniform_buffer_offset_alignment as u64;
300 let globals_size = std::mem::size_of::<GlobalParams>() as u64;
301 let gamma_size = std::mem::size_of::<GammaParams>() as u64;
302 let path_globals_offset = globals_size.next_multiple_of(uniform_alignment);
303 let gamma_offset = (path_globals_offset + globals_size).next_multiple_of(uniform_alignment);
304
305 let globals_buffer = device.create_buffer(&wgpu::BufferDescriptor {
306 label: Some("globals_buffer"),
307 size: gamma_offset + gamma_size,
308 usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
309 mapped_at_creation: false,
310 });
311
312 let max_buffer_size = device.limits().max_buffer_size;
313 let storage_buffer_alignment = device.limits().min_storage_buffer_offset_alignment as u64;
314 let initial_instance_buffer_capacity = 2 * 1024 * 1024;
315 let instance_buffer = device.create_buffer(&wgpu::BufferDescriptor {
316 label: Some("instance_buffer"),
317 size: initial_instance_buffer_capacity,
318 usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
319 mapped_at_creation: false,
320 });
321
322 let globals_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
323 label: Some("globals_bind_group"),
324 layout: &bind_group_layouts.globals,
325 entries: &[
326 wgpu::BindGroupEntry {
327 binding: 0,
328 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
329 buffer: &globals_buffer,
330 offset: 0,
331 size: Some(NonZeroU64::new(globals_size).unwrap()),
332 }),
333 },
334 wgpu::BindGroupEntry {
335 binding: 1,
336 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
337 buffer: &globals_buffer,
338 offset: gamma_offset,
339 size: Some(NonZeroU64::new(gamma_size).unwrap()),
340 }),
341 },
342 ],
343 });
344
345 let path_globals_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
346 label: Some("path_globals_bind_group"),
347 layout: &bind_group_layouts.globals,
348 entries: &[
349 wgpu::BindGroupEntry {
350 binding: 0,
351 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
352 buffer: &globals_buffer,
353 offset: path_globals_offset,
354 size: Some(NonZeroU64::new(globals_size).unwrap()),
355 }),
356 },
357 wgpu::BindGroupEntry {
358 binding: 1,
359 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
360 buffer: &globals_buffer,
361 offset: gamma_offset,
362 size: Some(NonZeroU64::new(gamma_size).unwrap()),
363 }),
364 },
365 ],
366 });
367
368 let adapter_info = context.adapter.get_info();
369
370 Ok(Self {
371 device,
372 queue,
373 surface,
374 surface_config,
375 surface_configured: true,
376 pipelines,
377 bind_group_layouts,
378 atlas,
379 atlas_sampler,
380 globals_buffer,
381 path_globals_offset,
382 gamma_offset,
383 globals_bind_group,
384 path_globals_bind_group,
385 instance_buffer,
386 instance_buffer_capacity: initial_instance_buffer_capacity,
387 max_buffer_size,
388 storage_buffer_alignment,
389 // Defer intermediate texture creation to first draw call via ensure_intermediate_textures().
390 // This avoids panics when the device/surface is in an invalid state during initialization.
391 path_intermediate_texture: None,
392 path_intermediate_view: None,
393 path_msaa_texture: None,
394 path_msaa_view: None,
395 rendering_params,
396 dual_source_blending,
397 adapter_info,
398 transparent_alpha_mode,
399 opaque_alpha_mode,
400 max_texture_size,
401 })
402 }
403
404 fn create_bind_group_layouts(device: &wgpu::Device) -> WgpuBindGroupLayouts {
405 let globals =
406 device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
407 label: Some("globals_layout"),
408 entries: &[
409 wgpu::BindGroupLayoutEntry {
410 binding: 0,
411 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
412 ty: wgpu::BindingType::Buffer {
413 ty: wgpu::BufferBindingType::Uniform,
414 has_dynamic_offset: false,
415 min_binding_size: NonZeroU64::new(
416 std::mem::size_of::<GlobalParams>() as u64
417 ),
418 },
419 count: None,
420 },
421 wgpu::BindGroupLayoutEntry {
422 binding: 1,
423 visibility: wgpu::ShaderStages::FRAGMENT,
424 ty: wgpu::BindingType::Buffer {
425 ty: wgpu::BufferBindingType::Uniform,
426 has_dynamic_offset: false,
427 min_binding_size: NonZeroU64::new(
428 std::mem::size_of::<GammaParams>() as u64
429 ),
430 },
431 count: None,
432 },
433 ],
434 });
435
436 let storage_buffer_entry = |binding: u32| wgpu::BindGroupLayoutEntry {
437 binding,
438 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
439 ty: wgpu::BindingType::Buffer {
440 ty: wgpu::BufferBindingType::Storage { read_only: true },
441 has_dynamic_offset: false,
442 min_binding_size: None,
443 },
444 count: None,
445 };
446
447 let instances = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
448 label: Some("instances_layout"),
449 entries: &[storage_buffer_entry(0)],
450 });
451
452 let instances_with_texture =
453 device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
454 label: Some("instances_with_texture_layout"),
455 entries: &[
456 storage_buffer_entry(0),
457 wgpu::BindGroupLayoutEntry {
458 binding: 1,
459 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
460 ty: wgpu::BindingType::Texture {
461 sample_type: wgpu::TextureSampleType::Float { filterable: true },
462 view_dimension: wgpu::TextureViewDimension::D2,
463 multisampled: false,
464 },
465 count: None,
466 },
467 wgpu::BindGroupLayoutEntry {
468 binding: 2,
469 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
470 ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
471 count: None,
472 },
473 ],
474 });
475
476 let surfaces = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
477 label: Some("surfaces_layout"),
478 entries: &[
479 wgpu::BindGroupLayoutEntry {
480 binding: 0,
481 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
482 ty: wgpu::BindingType::Buffer {
483 ty: wgpu::BufferBindingType::Uniform,
484 has_dynamic_offset: false,
485 min_binding_size: NonZeroU64::new(
486 std::mem::size_of::<SurfaceParams>() as u64
487 ),
488 },
489 count: None,
490 },
491 wgpu::BindGroupLayoutEntry {
492 binding: 1,
493 visibility: wgpu::ShaderStages::FRAGMENT,
494 ty: wgpu::BindingType::Texture {
495 sample_type: wgpu::TextureSampleType::Float { filterable: true },
496 view_dimension: wgpu::TextureViewDimension::D2,
497 multisampled: false,
498 },
499 count: None,
500 },
501 wgpu::BindGroupLayoutEntry {
502 binding: 2,
503 visibility: wgpu::ShaderStages::FRAGMENT,
504 ty: wgpu::BindingType::Texture {
505 sample_type: wgpu::TextureSampleType::Float { filterable: true },
506 view_dimension: wgpu::TextureViewDimension::D2,
507 multisampled: false,
508 },
509 count: None,
510 },
511 wgpu::BindGroupLayoutEntry {
512 binding: 3,
513 visibility: wgpu::ShaderStages::FRAGMENT,
514 ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
515 count: None,
516 },
517 ],
518 });
519
520 WgpuBindGroupLayouts {
521 globals,
522 instances,
523 instances_with_texture,
524 surfaces,
525 }
526 }
527
528 fn create_pipelines(
529 device: &wgpu::Device,
530 layouts: &WgpuBindGroupLayouts,
531 surface_format: wgpu::TextureFormat,
532 alpha_mode: wgpu::CompositeAlphaMode,
533 path_sample_count: u32,
534 dual_source_blending: bool,
535 ) -> WgpuPipelines {
536 let base_shader_source = include_str!("shaders.wgsl");
537 let shader_module = device.create_shader_module(wgpu::ShaderModuleDescriptor {
538 label: Some("gpui_shaders"),
539 source: wgpu::ShaderSource::Wgsl(std::borrow::Cow::Borrowed(base_shader_source)),
540 });
541
542 let subpixel_shader_source = include_str!("shaders_subpixel.wgsl");
543 let subpixel_shader_module = if dual_source_blending {
544 let combined = format!(
545 "enable dual_source_blending;\n{base_shader_source}\n{subpixel_shader_source}"
546 );
547 Some(device.create_shader_module(wgpu::ShaderModuleDescriptor {
548 label: Some("gpui_subpixel_shaders"),
549 source: wgpu::ShaderSource::Wgsl(std::borrow::Cow::Owned(combined)),
550 }))
551 } else {
552 None
553 };
554
555 let blend_mode = match alpha_mode {
556 wgpu::CompositeAlphaMode::PreMultiplied => {
557 wgpu::BlendState::PREMULTIPLIED_ALPHA_BLENDING
558 }
559 _ => wgpu::BlendState::ALPHA_BLENDING,
560 };
561
562 let color_target = wgpu::ColorTargetState {
563 format: surface_format,
564 blend: Some(blend_mode),
565 write_mask: wgpu::ColorWrites::ALL,
566 };
567
568 let create_pipeline = |name: &str,
569 vs_entry: &str,
570 fs_entry: &str,
571 globals_layout: &wgpu::BindGroupLayout,
572 data_layout: &wgpu::BindGroupLayout,
573 topology: wgpu::PrimitiveTopology,
574 color_targets: &[Option<wgpu::ColorTargetState>],
575 sample_count: u32,
576 module: &wgpu::ShaderModule| {
577 let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
578 label: Some(&format!("{name}_layout")),
579 bind_group_layouts: &[globals_layout, data_layout],
580 immediate_size: 0,
581 });
582
583 device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
584 label: Some(name),
585 layout: Some(&pipeline_layout),
586 vertex: wgpu::VertexState {
587 module,
588 entry_point: Some(vs_entry),
589 buffers: &[],
590 compilation_options: wgpu::PipelineCompilationOptions::default(),
591 },
592 fragment: Some(wgpu::FragmentState {
593 module,
594 entry_point: Some(fs_entry),
595 targets: color_targets,
596 compilation_options: wgpu::PipelineCompilationOptions::default(),
597 }),
598 primitive: wgpu::PrimitiveState {
599 topology,
600 strip_index_format: None,
601 front_face: wgpu::FrontFace::Ccw,
602 cull_mode: None,
603 polygon_mode: wgpu::PolygonMode::Fill,
604 unclipped_depth: false,
605 conservative: false,
606 },
607 depth_stencil: None,
608 multisample: wgpu::MultisampleState {
609 count: sample_count,
610 mask: !0,
611 alpha_to_coverage_enabled: false,
612 },
613 multiview_mask: None,
614 cache: None,
615 })
616 };
617
618 let quads = create_pipeline(
619 "quads",
620 "vs_quad",
621 "fs_quad",
622 &layouts.globals,
623 &layouts.instances,
624 wgpu::PrimitiveTopology::TriangleStrip,
625 &[Some(color_target.clone())],
626 1,
627 &shader_module,
628 );
629
630 let shadows = create_pipeline(
631 "shadows",
632 "vs_shadow",
633 "fs_shadow",
634 &layouts.globals,
635 &layouts.instances,
636 wgpu::PrimitiveTopology::TriangleStrip,
637 &[Some(color_target.clone())],
638 1,
639 &shader_module,
640 );
641
642 let path_rasterization = create_pipeline(
643 "path_rasterization",
644 "vs_path_rasterization",
645 "fs_path_rasterization",
646 &layouts.globals,
647 &layouts.instances,
648 wgpu::PrimitiveTopology::TriangleList,
649 &[Some(wgpu::ColorTargetState {
650 format: surface_format,
651 blend: Some(wgpu::BlendState::PREMULTIPLIED_ALPHA_BLENDING),
652 write_mask: wgpu::ColorWrites::ALL,
653 })],
654 path_sample_count,
655 &shader_module,
656 );
657
658 let paths_blend = wgpu::BlendState {
659 color: wgpu::BlendComponent {
660 src_factor: wgpu::BlendFactor::One,
661 dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
662 operation: wgpu::BlendOperation::Add,
663 },
664 alpha: wgpu::BlendComponent {
665 src_factor: wgpu::BlendFactor::One,
666 dst_factor: wgpu::BlendFactor::One,
667 operation: wgpu::BlendOperation::Add,
668 },
669 };
670
671 let paths = create_pipeline(
672 "paths",
673 "vs_path",
674 "fs_path",
675 &layouts.globals,
676 &layouts.instances_with_texture,
677 wgpu::PrimitiveTopology::TriangleStrip,
678 &[Some(wgpu::ColorTargetState {
679 format: surface_format,
680 blend: Some(paths_blend),
681 write_mask: wgpu::ColorWrites::ALL,
682 })],
683 1,
684 &shader_module,
685 );
686
687 let underlines = create_pipeline(
688 "underlines",
689 "vs_underline",
690 "fs_underline",
691 &layouts.globals,
692 &layouts.instances,
693 wgpu::PrimitiveTopology::TriangleStrip,
694 &[Some(color_target.clone())],
695 1,
696 &shader_module,
697 );
698
699 let mono_sprites = create_pipeline(
700 "mono_sprites",
701 "vs_mono_sprite",
702 "fs_mono_sprite",
703 &layouts.globals,
704 &layouts.instances_with_texture,
705 wgpu::PrimitiveTopology::TriangleStrip,
706 &[Some(color_target.clone())],
707 1,
708 &shader_module,
709 );
710
711 let subpixel_sprites = if let Some(subpixel_module) = &subpixel_shader_module {
712 let subpixel_blend = wgpu::BlendState {
713 color: wgpu::BlendComponent {
714 src_factor: wgpu::BlendFactor::Src1,
715 dst_factor: wgpu::BlendFactor::OneMinusSrc1,
716 operation: wgpu::BlendOperation::Add,
717 },
718 alpha: wgpu::BlendComponent {
719 src_factor: wgpu::BlendFactor::One,
720 dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
721 operation: wgpu::BlendOperation::Add,
722 },
723 };
724
725 Some(create_pipeline(
726 "subpixel_sprites",
727 "vs_subpixel_sprite",
728 "fs_subpixel_sprite",
729 &layouts.globals,
730 &layouts.instances_with_texture,
731 wgpu::PrimitiveTopology::TriangleStrip,
732 &[Some(wgpu::ColorTargetState {
733 format: surface_format,
734 blend: Some(subpixel_blend),
735 write_mask: wgpu::ColorWrites::COLOR,
736 })],
737 1,
738 subpixel_module,
739 ))
740 } else {
741 None
742 };
743
744 let poly_sprites = create_pipeline(
745 "poly_sprites",
746 "vs_poly_sprite",
747 "fs_poly_sprite",
748 &layouts.globals,
749 &layouts.instances_with_texture,
750 wgpu::PrimitiveTopology::TriangleStrip,
751 &[Some(color_target.clone())],
752 1,
753 &shader_module,
754 );
755
756 let surfaces = create_pipeline(
757 "surfaces",
758 "vs_surface",
759 "fs_surface",
760 &layouts.globals,
761 &layouts.surfaces,
762 wgpu::PrimitiveTopology::TriangleStrip,
763 &[Some(color_target)],
764 1,
765 &shader_module,
766 );
767
768 WgpuPipelines {
769 quads,
770 shadows,
771 path_rasterization,
772 paths,
773 underlines,
774 mono_sprites,
775 subpixel_sprites,
776 poly_sprites,
777 surfaces,
778 }
779 }
780
781 fn create_path_intermediate(
782 device: &wgpu::Device,
783 format: wgpu::TextureFormat,
784 width: u32,
785 height: u32,
786 ) -> (wgpu::Texture, wgpu::TextureView) {
787 let texture = device.create_texture(&wgpu::TextureDescriptor {
788 label: Some("path_intermediate"),
789 size: wgpu::Extent3d {
790 width: width.max(1),
791 height: height.max(1),
792 depth_or_array_layers: 1,
793 },
794 mip_level_count: 1,
795 sample_count: 1,
796 dimension: wgpu::TextureDimension::D2,
797 format,
798 usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::TEXTURE_BINDING,
799 view_formats: &[],
800 });
801 let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
802 (texture, view)
803 }
804
805 fn create_msaa_if_needed(
806 device: &wgpu::Device,
807 format: wgpu::TextureFormat,
808 width: u32,
809 height: u32,
810 sample_count: u32,
811 ) -> Option<(wgpu::Texture, wgpu::TextureView)> {
812 if sample_count <= 1 {
813 return None;
814 }
815 let texture = device.create_texture(&wgpu::TextureDescriptor {
816 label: Some("path_msaa"),
817 size: wgpu::Extent3d {
818 width: width.max(1),
819 height: height.max(1),
820 depth_or_array_layers: 1,
821 },
822 mip_level_count: 1,
823 sample_count,
824 dimension: wgpu::TextureDimension::D2,
825 format,
826 usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
827 view_formats: &[],
828 });
829 let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
830 Some((texture, view))
831 }
832
833 pub fn update_drawable_size(&mut self, size: Size<DevicePixels>) {
834 let width = size.width.0 as u32;
835 let height = size.height.0 as u32;
836
837 if width != self.surface_config.width || height != self.surface_config.height {
838 let clamped_width = width.min(self.max_texture_size);
839 let clamped_height = height.min(self.max_texture_size);
840
841 if clamped_width != width || clamped_height != height {
842 warn!(
843 "Requested surface size ({}, {}) exceeds maximum texture dimension {}. \
844 Clamping to ({}, {}). Window content may not fill the entire window.",
845 width, height, self.max_texture_size, clamped_width, clamped_height
846 );
847 }
848
849 // Wait for any in-flight GPU work to complete before destroying textures
850 if let Err(e) = self.device.poll(wgpu::PollType::Wait {
851 submission_index: None,
852 timeout: None,
853 }) {
854 warn!("Failed to poll device during resize: {e:?}");
855 }
856
857 // Destroy old textures before allocating new ones to avoid GPU memory spikes
858 if let Some(ref texture) = self.path_intermediate_texture {
859 texture.destroy();
860 }
861 if let Some(ref texture) = self.path_msaa_texture {
862 texture.destroy();
863 }
864
865 self.surface_config.width = clamped_width.max(1);
866 self.surface_config.height = clamped_height.max(1);
867 if self.surface_configured {
868 self.surface.configure(&self.device, &self.surface_config);
869 }
870
871 // Invalidate intermediate textures - they will be lazily recreated
872 // in draw() after we confirm the surface is healthy. This avoids
873 // panics when the device/surface is in an invalid state during resize.
874 self.path_intermediate_texture = None;
875 self.path_intermediate_view = None;
876 self.path_msaa_texture = None;
877 self.path_msaa_view = None;
878 }
879 }
880
881 fn ensure_intermediate_textures(&mut self) {
882 if self.path_intermediate_texture.is_some() {
883 return;
884 }
885
886 let (path_intermediate_texture, path_intermediate_view) = {
887 let (t, v) = Self::create_path_intermediate(
888 &self.device,
889 self.surface_config.format,
890 self.surface_config.width,
891 self.surface_config.height,
892 );
893 (Some(t), Some(v))
894 };
895 self.path_intermediate_texture = path_intermediate_texture;
896 self.path_intermediate_view = path_intermediate_view;
897
898 let (path_msaa_texture, path_msaa_view) = Self::create_msaa_if_needed(
899 &self.device,
900 self.surface_config.format,
901 self.surface_config.width,
902 self.surface_config.height,
903 self.rendering_params.path_sample_count,
904 )
905 .map(|(t, v)| (Some(t), Some(v)))
906 .unwrap_or((None, None));
907 self.path_msaa_texture = path_msaa_texture;
908 self.path_msaa_view = path_msaa_view;
909 }
910
911 pub fn update_transparency(&mut self, transparent: bool) {
912 let new_alpha_mode = if transparent {
913 self.transparent_alpha_mode
914 } else {
915 self.opaque_alpha_mode
916 };
917
918 if new_alpha_mode != self.surface_config.alpha_mode {
919 self.surface_config.alpha_mode = new_alpha_mode;
920 if self.surface_configured {
921 self.surface.configure(&self.device, &self.surface_config);
922 }
923 self.pipelines = Self::create_pipelines(
924 &self.device,
925 &self.bind_group_layouts,
926 self.surface_config.format,
927 self.surface_config.alpha_mode,
928 self.rendering_params.path_sample_count,
929 self.dual_source_blending,
930 );
931 }
932 }
933
934 #[allow(dead_code)]
935 pub fn viewport_size(&self) -> Size<DevicePixels> {
936 Size {
937 width: DevicePixels(self.surface_config.width as i32),
938 height: DevicePixels(self.surface_config.height as i32),
939 }
940 }
941
942 pub fn sprite_atlas(&self) -> &Arc<WgpuAtlas> {
943 &self.atlas
944 }
945
946 pub fn supports_dual_source_blending(&self) -> bool {
947 self.dual_source_blending
948 }
949
950 pub fn gpu_specs(&self) -> GpuSpecs {
951 GpuSpecs {
952 is_software_emulated: self.adapter_info.device_type == wgpu::DeviceType::Cpu,
953 device_name: self.adapter_info.name.clone(),
954 driver_name: self.adapter_info.driver.clone(),
955 driver_info: self.adapter_info.driver_info.clone(),
956 }
957 }
958
959 pub fn max_texture_size(&self) -> u32 {
960 self.max_texture_size
961 }
962
963 pub fn draw(&mut self, scene: &Scene) {
964 self.atlas.before_frame();
965
966 let frame = match self.surface.get_current_texture() {
967 Ok(frame) => frame,
968 Err(wgpu::SurfaceError::Lost | wgpu::SurfaceError::Outdated) => {
969 self.surface_configured = false;
970 return;
971 }
972 Err(e) => {
973 log::error!("Failed to acquire surface texture: {e}");
974 return;
975 }
976 };
977
978 // Now that we know the surface is healthy, ensure intermediate textures exist
979 self.ensure_intermediate_textures();
980
981 let frame_view = frame
982 .texture
983 .create_view(&wgpu::TextureViewDescriptor::default());
984
985 let gamma_params = GammaParams {
986 gamma_ratios: self.rendering_params.gamma_ratios,
987 grayscale_enhanced_contrast: self.rendering_params.grayscale_enhanced_contrast,
988 subpixel_enhanced_contrast: self.rendering_params.subpixel_enhanced_contrast,
989 _pad: [0.0; 2],
990 };
991
992 let globals = GlobalParams {
993 viewport_size: [
994 self.surface_config.width as f32,
995 self.surface_config.height as f32,
996 ],
997 premultiplied_alpha: if self.surface_config.alpha_mode
998 == wgpu::CompositeAlphaMode::PreMultiplied
999 {
1000 1
1001 } else {
1002 0
1003 },
1004 pad: 0,
1005 };
1006
1007 let path_globals = GlobalParams {
1008 premultiplied_alpha: 0,
1009 ..globals
1010 };
1011
1012 self.queue
1013 .write_buffer(&self.globals_buffer, 0, bytemuck::bytes_of(&globals));
1014 self.queue.write_buffer(
1015 &self.globals_buffer,
1016 self.path_globals_offset,
1017 bytemuck::bytes_of(&path_globals),
1018 );
1019 self.queue.write_buffer(
1020 &self.globals_buffer,
1021 self.gamma_offset,
1022 bytemuck::bytes_of(&gamma_params),
1023 );
1024
1025 loop {
1026 let mut instance_offset: u64 = 0;
1027 let mut overflow = false;
1028
1029 let mut encoder = self
1030 .device
1031 .create_command_encoder(&wgpu::CommandEncoderDescriptor {
1032 label: Some("main_encoder"),
1033 });
1034
1035 {
1036 let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
1037 label: Some("main_pass"),
1038 color_attachments: &[Some(wgpu::RenderPassColorAttachment {
1039 view: &frame_view,
1040 resolve_target: None,
1041 ops: wgpu::Operations {
1042 load: wgpu::LoadOp::Clear(wgpu::Color::TRANSPARENT),
1043 store: wgpu::StoreOp::Store,
1044 },
1045 depth_slice: None,
1046 })],
1047 depth_stencil_attachment: None,
1048 ..Default::default()
1049 });
1050
1051 for batch in scene.batches() {
1052 let ok = match batch {
1053 PrimitiveBatch::Quads(range) => {
1054 self.draw_quads(&scene.quads[range], &mut instance_offset, &mut pass)
1055 }
1056 PrimitiveBatch::Shadows(range) => self.draw_shadows(
1057 &scene.shadows[range],
1058 &mut instance_offset,
1059 &mut pass,
1060 ),
1061 PrimitiveBatch::Paths(range) => {
1062 let paths = &scene.paths[range];
1063 if paths.is_empty() {
1064 continue;
1065 }
1066
1067 drop(pass);
1068
1069 let did_draw = self.draw_paths_to_intermediate(
1070 &mut encoder,
1071 paths,
1072 &mut instance_offset,
1073 );
1074
1075 pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
1076 label: Some("main_pass_continued"),
1077 color_attachments: &[Some(wgpu::RenderPassColorAttachment {
1078 view: &frame_view,
1079 resolve_target: None,
1080 ops: wgpu::Operations {
1081 load: wgpu::LoadOp::Load,
1082 store: wgpu::StoreOp::Store,
1083 },
1084 depth_slice: None,
1085 })],
1086 depth_stencil_attachment: None,
1087 ..Default::default()
1088 });
1089
1090 if did_draw {
1091 self.draw_paths_from_intermediate(
1092 paths,
1093 &mut instance_offset,
1094 &mut pass,
1095 )
1096 } else {
1097 false
1098 }
1099 }
1100 PrimitiveBatch::Underlines(range) => self.draw_underlines(
1101 &scene.underlines[range],
1102 &mut instance_offset,
1103 &mut pass,
1104 ),
1105 PrimitiveBatch::MonochromeSprites { texture_id, range } => self
1106 .draw_monochrome_sprites(
1107 &scene.monochrome_sprites[range],
1108 texture_id,
1109 &mut instance_offset,
1110 &mut pass,
1111 ),
1112 PrimitiveBatch::SubpixelSprites { texture_id, range } => self
1113 .draw_subpixel_sprites(
1114 &scene.subpixel_sprites[range],
1115 texture_id,
1116 &mut instance_offset,
1117 &mut pass,
1118 ),
1119 PrimitiveBatch::PolychromeSprites { texture_id, range } => self
1120 .draw_polychrome_sprites(
1121 &scene.polychrome_sprites[range],
1122 texture_id,
1123 &mut instance_offset,
1124 &mut pass,
1125 ),
1126 PrimitiveBatch::Surfaces(_surfaces) => {
1127 // Surfaces are macOS-only for video playback
1128 // Not implemented for Linux/wgpu
1129 true
1130 }
1131 };
1132 if !ok {
1133 overflow = true;
1134 break;
1135 }
1136 }
1137 }
1138
1139 if overflow {
1140 drop(encoder);
1141 if self.instance_buffer_capacity >= self.max_buffer_size {
1142 log::error!(
1143 "instance buffer size grew too large: {}",
1144 self.instance_buffer_capacity
1145 );
1146 frame.present();
1147 return;
1148 }
1149 self.grow_instance_buffer();
1150 continue;
1151 }
1152
1153 self.queue.submit(std::iter::once(encoder.finish()));
1154 frame.present();
1155 return;
1156 }
1157 }
1158
1159 fn draw_quads(
1160 &self,
1161 quads: &[Quad],
1162 instance_offset: &mut u64,
1163 pass: &mut wgpu::RenderPass<'_>,
1164 ) -> bool {
1165 let data = unsafe { Self::instance_bytes(quads) };
1166 self.draw_instances(
1167 data,
1168 quads.len() as u32,
1169 &self.pipelines.quads,
1170 instance_offset,
1171 pass,
1172 )
1173 }
1174
1175 fn draw_shadows(
1176 &self,
1177 shadows: &[Shadow],
1178 instance_offset: &mut u64,
1179 pass: &mut wgpu::RenderPass<'_>,
1180 ) -> bool {
1181 let data = unsafe { Self::instance_bytes(shadows) };
1182 self.draw_instances(
1183 data,
1184 shadows.len() as u32,
1185 &self.pipelines.shadows,
1186 instance_offset,
1187 pass,
1188 )
1189 }
1190
1191 fn draw_underlines(
1192 &self,
1193 underlines: &[Underline],
1194 instance_offset: &mut u64,
1195 pass: &mut wgpu::RenderPass<'_>,
1196 ) -> bool {
1197 let data = unsafe { Self::instance_bytes(underlines) };
1198 self.draw_instances(
1199 data,
1200 underlines.len() as u32,
1201 &self.pipelines.underlines,
1202 instance_offset,
1203 pass,
1204 )
1205 }
1206
1207 fn draw_monochrome_sprites(
1208 &self,
1209 sprites: &[MonochromeSprite],
1210 texture_id: AtlasTextureId,
1211 instance_offset: &mut u64,
1212 pass: &mut wgpu::RenderPass<'_>,
1213 ) -> bool {
1214 let tex_info = self.atlas.get_texture_info(texture_id);
1215 let data = unsafe { Self::instance_bytes(sprites) };
1216 self.draw_instances_with_texture(
1217 data,
1218 sprites.len() as u32,
1219 &tex_info.view,
1220 &self.pipelines.mono_sprites,
1221 instance_offset,
1222 pass,
1223 )
1224 }
1225
1226 fn draw_subpixel_sprites(
1227 &self,
1228 sprites: &[SubpixelSprite],
1229 texture_id: AtlasTextureId,
1230 instance_offset: &mut u64,
1231 pass: &mut wgpu::RenderPass<'_>,
1232 ) -> bool {
1233 let tex_info = self.atlas.get_texture_info(texture_id);
1234 let data = unsafe { Self::instance_bytes(sprites) };
1235 let pipeline = self
1236 .pipelines
1237 .subpixel_sprites
1238 .as_ref()
1239 .unwrap_or(&self.pipelines.mono_sprites);
1240 self.draw_instances_with_texture(
1241 data,
1242 sprites.len() as u32,
1243 &tex_info.view,
1244 pipeline,
1245 instance_offset,
1246 pass,
1247 )
1248 }
1249
1250 fn draw_polychrome_sprites(
1251 &self,
1252 sprites: &[PolychromeSprite],
1253 texture_id: AtlasTextureId,
1254 instance_offset: &mut u64,
1255 pass: &mut wgpu::RenderPass<'_>,
1256 ) -> bool {
1257 let tex_info = self.atlas.get_texture_info(texture_id);
1258 let data = unsafe { Self::instance_bytes(sprites) };
1259 self.draw_instances_with_texture(
1260 data,
1261 sprites.len() as u32,
1262 &tex_info.view,
1263 &self.pipelines.poly_sprites,
1264 instance_offset,
1265 pass,
1266 )
1267 }
1268
1269 fn draw_instances(
1270 &self,
1271 data: &[u8],
1272 instance_count: u32,
1273 pipeline: &wgpu::RenderPipeline,
1274 instance_offset: &mut u64,
1275 pass: &mut wgpu::RenderPass<'_>,
1276 ) -> bool {
1277 if instance_count == 0 {
1278 return true;
1279 }
1280 let Some((offset, size)) = self.write_to_instance_buffer(instance_offset, data) else {
1281 return false;
1282 };
1283 let bind_group = self.device.create_bind_group(&wgpu::BindGroupDescriptor {
1284 label: None,
1285 layout: &self.bind_group_layouts.instances,
1286 entries: &[wgpu::BindGroupEntry {
1287 binding: 0,
1288 resource: self.instance_binding(offset, size),
1289 }],
1290 });
1291 pass.set_pipeline(pipeline);
1292 pass.set_bind_group(0, &self.globals_bind_group, &[]);
1293 pass.set_bind_group(1, &bind_group, &[]);
1294 pass.draw(0..4, 0..instance_count);
1295 true
1296 }
1297
1298 fn draw_instances_with_texture(
1299 &self,
1300 data: &[u8],
1301 instance_count: u32,
1302 texture_view: &wgpu::TextureView,
1303 pipeline: &wgpu::RenderPipeline,
1304 instance_offset: &mut u64,
1305 pass: &mut wgpu::RenderPass<'_>,
1306 ) -> bool {
1307 if instance_count == 0 {
1308 return true;
1309 }
1310 let Some((offset, size)) = self.write_to_instance_buffer(instance_offset, data) else {
1311 return false;
1312 };
1313 let bind_group = self.device.create_bind_group(&wgpu::BindGroupDescriptor {
1314 label: None,
1315 layout: &self.bind_group_layouts.instances_with_texture,
1316 entries: &[
1317 wgpu::BindGroupEntry {
1318 binding: 0,
1319 resource: self.instance_binding(offset, size),
1320 },
1321 wgpu::BindGroupEntry {
1322 binding: 1,
1323 resource: wgpu::BindingResource::TextureView(texture_view),
1324 },
1325 wgpu::BindGroupEntry {
1326 binding: 2,
1327 resource: wgpu::BindingResource::Sampler(&self.atlas_sampler),
1328 },
1329 ],
1330 });
1331 pass.set_pipeline(pipeline);
1332 pass.set_bind_group(0, &self.globals_bind_group, &[]);
1333 pass.set_bind_group(1, &bind_group, &[]);
1334 pass.draw(0..4, 0..instance_count);
1335 true
1336 }
1337
1338 unsafe fn instance_bytes<T>(instances: &[T]) -> &[u8] {
1339 unsafe {
1340 std::slice::from_raw_parts(
1341 instances.as_ptr() as *const u8,
1342 std::mem::size_of_val(instances),
1343 )
1344 }
1345 }
1346
1347 fn draw_paths_from_intermediate(
1348 &self,
1349 paths: &[Path<ScaledPixels>],
1350 instance_offset: &mut u64,
1351 pass: &mut wgpu::RenderPass<'_>,
1352 ) -> bool {
1353 let first_path = &paths[0];
1354 let sprites: Vec<PathSprite> = if paths.last().map(|p| &p.order) == Some(&first_path.order)
1355 {
1356 paths
1357 .iter()
1358 .map(|p| PathSprite {
1359 bounds: p.clipped_bounds(),
1360 })
1361 .collect()
1362 } else {
1363 let mut bounds = first_path.clipped_bounds();
1364 for path in paths.iter().skip(1) {
1365 bounds = bounds.union(&path.clipped_bounds());
1366 }
1367 vec![PathSprite { bounds }]
1368 };
1369
1370 let Some(path_intermediate_view) = self.path_intermediate_view.as_ref() else {
1371 return true;
1372 };
1373
1374 let sprite_data = unsafe { Self::instance_bytes(&sprites) };
1375 self.draw_instances_with_texture(
1376 sprite_data,
1377 sprites.len() as u32,
1378 path_intermediate_view,
1379 &self.pipelines.paths,
1380 instance_offset,
1381 pass,
1382 )
1383 }
1384
1385 fn draw_paths_to_intermediate(
1386 &self,
1387 encoder: &mut wgpu::CommandEncoder,
1388 paths: &[Path<ScaledPixels>],
1389 instance_offset: &mut u64,
1390 ) -> bool {
1391 let mut vertices = Vec::new();
1392 for path in paths {
1393 let bounds = path.clipped_bounds();
1394 vertices.extend(path.vertices.iter().map(|v| PathRasterizationVertex {
1395 xy_position: v.xy_position,
1396 st_position: v.st_position,
1397 color: path.color,
1398 bounds,
1399 }));
1400 }
1401
1402 if vertices.is_empty() {
1403 return true;
1404 }
1405
1406 let vertex_data = unsafe { Self::instance_bytes(&vertices) };
1407 let Some((vertex_offset, vertex_size)) =
1408 self.write_to_instance_buffer(instance_offset, vertex_data)
1409 else {
1410 return false;
1411 };
1412
1413 let data_bind_group = self.device.create_bind_group(&wgpu::BindGroupDescriptor {
1414 label: Some("path_rasterization_bind_group"),
1415 layout: &self.bind_group_layouts.instances,
1416 entries: &[wgpu::BindGroupEntry {
1417 binding: 0,
1418 resource: self.instance_binding(vertex_offset, vertex_size),
1419 }],
1420 });
1421
1422 let Some(path_intermediate_view) = self.path_intermediate_view.as_ref() else {
1423 return true;
1424 };
1425
1426 let (target_view, resolve_target) = if let Some(ref msaa_view) = self.path_msaa_view {
1427 (msaa_view, Some(path_intermediate_view))
1428 } else {
1429 (path_intermediate_view, None)
1430 };
1431
1432 {
1433 let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
1434 label: Some("path_rasterization_pass"),
1435 color_attachments: &[Some(wgpu::RenderPassColorAttachment {
1436 view: target_view,
1437 resolve_target,
1438 ops: wgpu::Operations {
1439 load: wgpu::LoadOp::Clear(wgpu::Color::TRANSPARENT),
1440 store: wgpu::StoreOp::Store,
1441 },
1442 depth_slice: None,
1443 })],
1444 depth_stencil_attachment: None,
1445 ..Default::default()
1446 });
1447
1448 pass.set_pipeline(&self.pipelines.path_rasterization);
1449 pass.set_bind_group(0, &self.path_globals_bind_group, &[]);
1450 pass.set_bind_group(1, &data_bind_group, &[]);
1451 pass.draw(0..vertices.len() as u32, 0..1);
1452 }
1453
1454 true
1455 }
1456
1457 fn grow_instance_buffer(&mut self) {
1458 let new_capacity = (self.instance_buffer_capacity * 2).min(self.max_buffer_size);
1459 log::info!("increased instance buffer size to {}", new_capacity);
1460 self.instance_buffer = self.device.create_buffer(&wgpu::BufferDescriptor {
1461 label: Some("instance_buffer"),
1462 size: new_capacity,
1463 usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
1464 mapped_at_creation: false,
1465 });
1466 self.instance_buffer_capacity = new_capacity;
1467 }
1468
1469 fn write_to_instance_buffer(
1470 &self,
1471 instance_offset: &mut u64,
1472 data: &[u8],
1473 ) -> Option<(u64, NonZeroU64)> {
1474 let offset = (*instance_offset).next_multiple_of(self.storage_buffer_alignment);
1475 let size = (data.len() as u64).max(16);
1476 if offset + size > self.instance_buffer_capacity {
1477 return None;
1478 }
1479 self.queue.write_buffer(&self.instance_buffer, offset, data);
1480 *instance_offset = offset + size;
1481 Some((offset, NonZeroU64::new(size).expect("size is at least 16")))
1482 }
1483
1484 fn instance_binding(&self, offset: u64, size: NonZeroU64) -> wgpu::BindingResource<'_> {
1485 wgpu::BindingResource::Buffer(wgpu::BufferBinding {
1486 buffer: &self.instance_buffer,
1487 offset,
1488 size: Some(size),
1489 })
1490 }
1491
1492 pub fn destroy(&mut self) {
1493 // wgpu resources are automatically cleaned up when dropped
1494 }
1495}
1496
1497struct RenderingParameters {
1498 path_sample_count: u32,
1499 gamma_ratios: [f32; 4],
1500 grayscale_enhanced_contrast: f32,
1501 subpixel_enhanced_contrast: f32,
1502}
1503
1504impl RenderingParameters {
1505 fn new(adapter: &wgpu::Adapter, surface_format: wgpu::TextureFormat) -> Self {
1506 use std::env;
1507
1508 let format_features = adapter.get_texture_format_features(surface_format);
1509 let path_sample_count = [4, 2, 1]
1510 .into_iter()
1511 .find(|&n| format_features.flags.sample_count_supported(n))
1512 .unwrap_or(1);
1513
1514 let gamma = env::var("ZED_FONTS_GAMMA")
1515 .ok()
1516 .and_then(|v| v.parse().ok())
1517 .unwrap_or(1.8_f32)
1518 .clamp(1.0, 2.2);
1519 let gamma_ratios = get_gamma_correction_ratios(gamma);
1520
1521 let grayscale_enhanced_contrast = env::var("ZED_FONTS_GRAYSCALE_ENHANCED_CONTRAST")
1522 .ok()
1523 .and_then(|v| v.parse().ok())
1524 .unwrap_or(1.0_f32)
1525 .max(0.0);
1526
1527 let subpixel_enhanced_contrast = env::var("ZED_FONTS_SUBPIXEL_ENHANCED_CONTRAST")
1528 .ok()
1529 .and_then(|v| v.parse().ok())
1530 .unwrap_or(0.5_f32)
1531 .max(0.0);
1532
1533 Self {
1534 path_sample_count,
1535 gamma_ratios,
1536 grayscale_enhanced_contrast,
1537 subpixel_enhanced_contrast,
1538 }
1539 }
1540}