1use crate::{WgpuAtlas, WgpuContext};
2use bytemuck::{Pod, Zeroable};
3use gpui::{
4 AtlasTextureId, Background, Bounds, DevicePixels, GpuSpecs, MonochromeSprite, Path, Point,
5 PolychromeSprite, PrimitiveBatch, Quad, ScaledPixels, Scene, Shadow, Size, SubpixelSprite,
6 Underline, get_gamma_correction_ratios,
7};
8use raw_window_handle::{HasDisplayHandle, HasWindowHandle};
9use std::num::NonZeroU64;
10use std::sync::Arc;
11
12#[repr(C)]
13#[derive(Clone, Copy, Pod, Zeroable)]
14struct GlobalParams {
15 viewport_size: [f32; 2],
16 premultiplied_alpha: u32,
17 pad: u32,
18}
19
20#[repr(C)]
21#[derive(Clone, Copy, Pod, Zeroable)]
22struct PodBounds {
23 origin: [f32; 2],
24 size: [f32; 2],
25}
26
27impl From<Bounds<ScaledPixels>> for PodBounds {
28 fn from(bounds: Bounds<ScaledPixels>) -> Self {
29 Self {
30 origin: [bounds.origin.x.0, bounds.origin.y.0],
31 size: [bounds.size.width.0, bounds.size.height.0],
32 }
33 }
34}
35
36#[repr(C)]
37#[derive(Clone, Copy, Pod, Zeroable)]
38struct SurfaceParams {
39 bounds: PodBounds,
40 content_mask: PodBounds,
41}
42
43#[repr(C)]
44#[derive(Clone, Copy, Pod, Zeroable)]
45struct GammaParams {
46 gamma_ratios: [f32; 4],
47 grayscale_enhanced_contrast: f32,
48 subpixel_enhanced_contrast: f32,
49 _pad: [f32; 2],
50}
51
52#[derive(Clone, Debug)]
53#[repr(C)]
54struct PathSprite {
55 bounds: Bounds<ScaledPixels>,
56}
57
58#[derive(Clone, Debug)]
59#[repr(C)]
60struct PathRasterizationVertex {
61 xy_position: Point<ScaledPixels>,
62 st_position: Point<f32>,
63 color: Background,
64 bounds: Bounds<ScaledPixels>,
65}
66
67pub struct WgpuSurfaceConfig {
68 pub size: Size<DevicePixels>,
69 pub transparent: bool,
70}
71
72struct WgpuPipelines {
73 quads: wgpu::RenderPipeline,
74 shadows: wgpu::RenderPipeline,
75 path_rasterization: wgpu::RenderPipeline,
76 paths: wgpu::RenderPipeline,
77 underlines: wgpu::RenderPipeline,
78 mono_sprites: wgpu::RenderPipeline,
79 subpixel_sprites: Option<wgpu::RenderPipeline>,
80 poly_sprites: wgpu::RenderPipeline,
81 #[allow(dead_code)]
82 surfaces: wgpu::RenderPipeline,
83}
84
85struct WgpuBindGroupLayouts {
86 globals: wgpu::BindGroupLayout,
87 instances: wgpu::BindGroupLayout,
88 instances_with_texture: wgpu::BindGroupLayout,
89 surfaces: wgpu::BindGroupLayout,
90}
91
92pub struct WgpuRenderer {
93 device: Arc<wgpu::Device>,
94 queue: Arc<wgpu::Queue>,
95 surface: wgpu::Surface<'static>,
96 surface_config: wgpu::SurfaceConfiguration,
97 pipelines: WgpuPipelines,
98 bind_group_layouts: WgpuBindGroupLayouts,
99 atlas: Arc<WgpuAtlas>,
100 atlas_sampler: wgpu::Sampler,
101 globals_buffer: wgpu::Buffer,
102 path_globals_offset: u64,
103 gamma_offset: u64,
104 globals_bind_group: wgpu::BindGroup,
105 path_globals_bind_group: wgpu::BindGroup,
106 instance_buffer: wgpu::Buffer,
107 instance_buffer_capacity: u64,
108 storage_buffer_alignment: u64,
109 path_intermediate_texture: wgpu::Texture,
110 path_intermediate_view: wgpu::TextureView,
111 path_msaa_texture: Option<wgpu::Texture>,
112 path_msaa_view: Option<wgpu::TextureView>,
113 rendering_params: RenderingParameters,
114 dual_source_blending: bool,
115 adapter_info: wgpu::AdapterInfo,
116 transparent_alpha_mode: wgpu::CompositeAlphaMode,
117 opaque_alpha_mode: wgpu::CompositeAlphaMode,
118}
119
120impl WgpuRenderer {
121 /// Creates a new WgpuRenderer from raw window handles.
122 ///
123 /// # Safety
124 /// The caller must ensure that the window handle remains valid for the lifetime
125 /// of the returned renderer.
126 pub fn new<W: HasWindowHandle + HasDisplayHandle>(
127 gpu_context: &mut Option<WgpuContext>,
128 window: &W,
129 config: WgpuSurfaceConfig,
130 ) -> anyhow::Result<Self> {
131 let window_handle = window
132 .window_handle()
133 .map_err(|e| anyhow::anyhow!("Failed to get window handle: {e}"))?;
134 let display_handle = window
135 .display_handle()
136 .map_err(|e| anyhow::anyhow!("Failed to get display handle: {e}"))?;
137
138 let target = wgpu::SurfaceTargetUnsafe::RawHandle {
139 raw_display_handle: display_handle.as_raw(),
140 raw_window_handle: window_handle.as_raw(),
141 };
142
143 // Use the existing context's instance if available, otherwise create a new one.
144 // The surface must be created with the same instance that will be used for
145 // adapter selection, otherwise wgpu will panic.
146 let instance = gpu_context
147 .as_ref()
148 .map(|ctx| ctx.instance.clone())
149 .unwrap_or_else(WgpuContext::instance);
150
151 // Safety: The caller guarantees that the window handle is valid for the
152 // lifetime of this renderer. In practice, the RawWindow struct is created
153 // from the native window handles and the surface is dropped before the window.
154 let surface = unsafe {
155 instance
156 .create_surface_unsafe(target)
157 .map_err(|e| anyhow::anyhow!("Failed to create surface: {e}"))?
158 };
159
160 let context = match gpu_context {
161 Some(context) => {
162 context.check_compatible_with_surface(&surface)?;
163 context
164 }
165 None => gpu_context.insert(WgpuContext::new(instance, &surface)?),
166 };
167
168 let surface_caps = surface.get_capabilities(&context.adapter);
169 let preferred_formats = [
170 wgpu::TextureFormat::Bgra8Unorm,
171 wgpu::TextureFormat::Rgba8Unorm,
172 ];
173 let surface_format = preferred_formats
174 .iter()
175 .find(|f| surface_caps.formats.contains(f))
176 .copied()
177 .or_else(|| surface_caps.formats.iter().find(|f| !f.is_srgb()).copied())
178 .or_else(|| surface_caps.formats.first().copied())
179 .ok_or_else(|| {
180 anyhow::anyhow!(
181 "Surface reports no supported texture formats for adapter {:?}",
182 context.adapter.get_info().name
183 )
184 })?;
185
186 let pick_alpha_mode =
187 |preferences: &[wgpu::CompositeAlphaMode]| -> anyhow::Result<wgpu::CompositeAlphaMode> {
188 preferences
189 .iter()
190 .find(|p| surface_caps.alpha_modes.contains(p))
191 .copied()
192 .or_else(|| surface_caps.alpha_modes.first().copied())
193 .ok_or_else(|| {
194 anyhow::anyhow!(
195 "Surface reports no supported alpha modes for adapter {:?}",
196 context.adapter.get_info().name
197 )
198 })
199 };
200
201 let transparent_alpha_mode = pick_alpha_mode(&[
202 wgpu::CompositeAlphaMode::PreMultiplied,
203 wgpu::CompositeAlphaMode::Inherit,
204 ])?;
205
206 let opaque_alpha_mode = pick_alpha_mode(&[
207 wgpu::CompositeAlphaMode::Opaque,
208 wgpu::CompositeAlphaMode::Inherit,
209 ])?;
210
211 let alpha_mode = if config.transparent {
212 transparent_alpha_mode
213 } else {
214 opaque_alpha_mode
215 };
216
217 let surface_config = wgpu::SurfaceConfiguration {
218 usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
219 format: surface_format,
220 width: config.size.width.0 as u32,
221 height: config.size.height.0 as u32,
222 present_mode: wgpu::PresentMode::Fifo,
223 desired_maximum_frame_latency: 2,
224 alpha_mode,
225 view_formats: vec![],
226 };
227 surface.configure(&context.device, &surface_config);
228
229 let device = Arc::clone(&context.device);
230 let queue = Arc::clone(&context.queue);
231 let dual_source_blending = context.supports_dual_source_blending();
232
233 let rendering_params = RenderingParameters::new(&context.adapter, surface_format);
234 let bind_group_layouts = Self::create_bind_group_layouts(&device);
235 let pipelines = Self::create_pipelines(
236 &device,
237 &bind_group_layouts,
238 surface_format,
239 alpha_mode,
240 rendering_params.path_sample_count,
241 dual_source_blending,
242 );
243
244 let atlas = Arc::new(WgpuAtlas::new(Arc::clone(&device), Arc::clone(&queue)));
245 let atlas_sampler = device.create_sampler(&wgpu::SamplerDescriptor {
246 label: Some("atlas_sampler"),
247 mag_filter: wgpu::FilterMode::Linear,
248 min_filter: wgpu::FilterMode::Linear,
249 ..Default::default()
250 });
251
252 let uniform_alignment = device.limits().min_uniform_buffer_offset_alignment as u64;
253 let globals_size = std::mem::size_of::<GlobalParams>() as u64;
254 let gamma_size = std::mem::size_of::<GammaParams>() as u64;
255 let path_globals_offset = globals_size.next_multiple_of(uniform_alignment);
256 let gamma_offset = (path_globals_offset + globals_size).next_multiple_of(uniform_alignment);
257
258 let globals_buffer = device.create_buffer(&wgpu::BufferDescriptor {
259 label: Some("globals_buffer"),
260 size: gamma_offset + gamma_size,
261 usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
262 mapped_at_creation: false,
263 });
264
265 let storage_buffer_alignment = device.limits().min_storage_buffer_offset_alignment as u64;
266 let initial_instance_buffer_capacity = 2 * 1024 * 1024;
267 let instance_buffer = device.create_buffer(&wgpu::BufferDescriptor {
268 label: Some("instance_buffer"),
269 size: initial_instance_buffer_capacity,
270 usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
271 mapped_at_creation: false,
272 });
273
274 let (path_intermediate_texture, path_intermediate_view) = Self::create_path_intermediate(
275 &device,
276 surface_format,
277 config.size.width.0 as u32,
278 config.size.height.0 as u32,
279 );
280
281 let (path_msaa_texture, path_msaa_view) = Self::create_msaa_if_needed(
282 &device,
283 surface_format,
284 config.size.width.0 as u32,
285 config.size.height.0 as u32,
286 rendering_params.path_sample_count,
287 )
288 .map(|(t, v)| (Some(t), Some(v)))
289 .unwrap_or((None, None));
290
291 let globals_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
292 label: Some("globals_bind_group"),
293 layout: &bind_group_layouts.globals,
294 entries: &[
295 wgpu::BindGroupEntry {
296 binding: 0,
297 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
298 buffer: &globals_buffer,
299 offset: 0,
300 size: Some(NonZeroU64::new(globals_size).unwrap()),
301 }),
302 },
303 wgpu::BindGroupEntry {
304 binding: 1,
305 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
306 buffer: &globals_buffer,
307 offset: gamma_offset,
308 size: Some(NonZeroU64::new(gamma_size).unwrap()),
309 }),
310 },
311 ],
312 });
313
314 let path_globals_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
315 label: Some("path_globals_bind_group"),
316 layout: &bind_group_layouts.globals,
317 entries: &[
318 wgpu::BindGroupEntry {
319 binding: 0,
320 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
321 buffer: &globals_buffer,
322 offset: path_globals_offset,
323 size: Some(NonZeroU64::new(globals_size).unwrap()),
324 }),
325 },
326 wgpu::BindGroupEntry {
327 binding: 1,
328 resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
329 buffer: &globals_buffer,
330 offset: gamma_offset,
331 size: Some(NonZeroU64::new(gamma_size).unwrap()),
332 }),
333 },
334 ],
335 });
336
337 let adapter_info = context.adapter.get_info();
338
339 Ok(Self {
340 device,
341 queue,
342 surface,
343 surface_config,
344 pipelines,
345 bind_group_layouts,
346 atlas,
347 atlas_sampler,
348 globals_buffer,
349 path_globals_offset,
350 gamma_offset,
351 globals_bind_group,
352 path_globals_bind_group,
353 instance_buffer,
354 instance_buffer_capacity: initial_instance_buffer_capacity,
355 storage_buffer_alignment,
356 path_intermediate_texture,
357 path_intermediate_view,
358 path_msaa_texture,
359 path_msaa_view,
360 rendering_params,
361 dual_source_blending,
362 adapter_info,
363 transparent_alpha_mode,
364 opaque_alpha_mode,
365 })
366 }
367
368 fn create_bind_group_layouts(device: &wgpu::Device) -> WgpuBindGroupLayouts {
369 let globals =
370 device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
371 label: Some("globals_layout"),
372 entries: &[
373 wgpu::BindGroupLayoutEntry {
374 binding: 0,
375 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
376 ty: wgpu::BindingType::Buffer {
377 ty: wgpu::BufferBindingType::Uniform,
378 has_dynamic_offset: false,
379 min_binding_size: NonZeroU64::new(
380 std::mem::size_of::<GlobalParams>() as u64
381 ),
382 },
383 count: None,
384 },
385 wgpu::BindGroupLayoutEntry {
386 binding: 1,
387 visibility: wgpu::ShaderStages::FRAGMENT,
388 ty: wgpu::BindingType::Buffer {
389 ty: wgpu::BufferBindingType::Uniform,
390 has_dynamic_offset: false,
391 min_binding_size: NonZeroU64::new(
392 std::mem::size_of::<GammaParams>() as u64
393 ),
394 },
395 count: None,
396 },
397 ],
398 });
399
400 let storage_buffer_entry = |binding: u32| wgpu::BindGroupLayoutEntry {
401 binding,
402 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
403 ty: wgpu::BindingType::Buffer {
404 ty: wgpu::BufferBindingType::Storage { read_only: true },
405 has_dynamic_offset: false,
406 min_binding_size: None,
407 },
408 count: None,
409 };
410
411 let instances = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
412 label: Some("instances_layout"),
413 entries: &[storage_buffer_entry(0)],
414 });
415
416 let instances_with_texture =
417 device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
418 label: Some("instances_with_texture_layout"),
419 entries: &[
420 storage_buffer_entry(0),
421 wgpu::BindGroupLayoutEntry {
422 binding: 1,
423 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
424 ty: wgpu::BindingType::Texture {
425 sample_type: wgpu::TextureSampleType::Float { filterable: true },
426 view_dimension: wgpu::TextureViewDimension::D2,
427 multisampled: false,
428 },
429 count: None,
430 },
431 wgpu::BindGroupLayoutEntry {
432 binding: 2,
433 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
434 ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
435 count: None,
436 },
437 ],
438 });
439
440 let surfaces = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
441 label: Some("surfaces_layout"),
442 entries: &[
443 wgpu::BindGroupLayoutEntry {
444 binding: 0,
445 visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
446 ty: wgpu::BindingType::Buffer {
447 ty: wgpu::BufferBindingType::Uniform,
448 has_dynamic_offset: false,
449 min_binding_size: NonZeroU64::new(
450 std::mem::size_of::<SurfaceParams>() as u64
451 ),
452 },
453 count: None,
454 },
455 wgpu::BindGroupLayoutEntry {
456 binding: 1,
457 visibility: wgpu::ShaderStages::FRAGMENT,
458 ty: wgpu::BindingType::Texture {
459 sample_type: wgpu::TextureSampleType::Float { filterable: true },
460 view_dimension: wgpu::TextureViewDimension::D2,
461 multisampled: false,
462 },
463 count: None,
464 },
465 wgpu::BindGroupLayoutEntry {
466 binding: 2,
467 visibility: wgpu::ShaderStages::FRAGMENT,
468 ty: wgpu::BindingType::Texture {
469 sample_type: wgpu::TextureSampleType::Float { filterable: true },
470 view_dimension: wgpu::TextureViewDimension::D2,
471 multisampled: false,
472 },
473 count: None,
474 },
475 wgpu::BindGroupLayoutEntry {
476 binding: 3,
477 visibility: wgpu::ShaderStages::FRAGMENT,
478 ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
479 count: None,
480 },
481 ],
482 });
483
484 WgpuBindGroupLayouts {
485 globals,
486 instances,
487 instances_with_texture,
488 surfaces,
489 }
490 }
491
492 fn create_pipelines(
493 device: &wgpu::Device,
494 layouts: &WgpuBindGroupLayouts,
495 surface_format: wgpu::TextureFormat,
496 alpha_mode: wgpu::CompositeAlphaMode,
497 path_sample_count: u32,
498 dual_source_blending: bool,
499 ) -> WgpuPipelines {
500 let shader_source = include_str!("shaders.wgsl");
501 let shader_module = device.create_shader_module(wgpu::ShaderModuleDescriptor {
502 label: Some("gpui_shaders"),
503 source: wgpu::ShaderSource::Wgsl(shader_source.into()),
504 });
505
506 let blend_mode = match alpha_mode {
507 wgpu::CompositeAlphaMode::PreMultiplied => {
508 wgpu::BlendState::PREMULTIPLIED_ALPHA_BLENDING
509 }
510 _ => wgpu::BlendState::ALPHA_BLENDING,
511 };
512
513 let color_target = wgpu::ColorTargetState {
514 format: surface_format,
515 blend: Some(blend_mode),
516 write_mask: wgpu::ColorWrites::ALL,
517 };
518
519 let create_pipeline = |name: &str,
520 vs_entry: &str,
521 fs_entry: &str,
522 globals_layout: &wgpu::BindGroupLayout,
523 data_layout: &wgpu::BindGroupLayout,
524 topology: wgpu::PrimitiveTopology,
525 color_targets: &[Option<wgpu::ColorTargetState>],
526 sample_count: u32| {
527 let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
528 label: Some(&format!("{name}_layout")),
529 bind_group_layouts: &[globals_layout, data_layout],
530 immediate_size: 0,
531 });
532
533 device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
534 label: Some(name),
535 layout: Some(&pipeline_layout),
536 vertex: wgpu::VertexState {
537 module: &shader_module,
538 entry_point: Some(vs_entry),
539 buffers: &[],
540 compilation_options: wgpu::PipelineCompilationOptions::default(),
541 },
542 fragment: Some(wgpu::FragmentState {
543 module: &shader_module,
544 entry_point: Some(fs_entry),
545 targets: color_targets,
546 compilation_options: wgpu::PipelineCompilationOptions::default(),
547 }),
548 primitive: wgpu::PrimitiveState {
549 topology,
550 strip_index_format: None,
551 front_face: wgpu::FrontFace::Ccw,
552 cull_mode: None,
553 polygon_mode: wgpu::PolygonMode::Fill,
554 unclipped_depth: false,
555 conservative: false,
556 },
557 depth_stencil: None,
558 multisample: wgpu::MultisampleState {
559 count: sample_count,
560 mask: !0,
561 alpha_to_coverage_enabled: false,
562 },
563 multiview_mask: None,
564 cache: None,
565 })
566 };
567
568 let quads = create_pipeline(
569 "quads",
570 "vs_quad",
571 "fs_quad",
572 &layouts.globals,
573 &layouts.instances,
574 wgpu::PrimitiveTopology::TriangleStrip,
575 &[Some(color_target.clone())],
576 1,
577 );
578
579 let shadows = create_pipeline(
580 "shadows",
581 "vs_shadow",
582 "fs_shadow",
583 &layouts.globals,
584 &layouts.instances,
585 wgpu::PrimitiveTopology::TriangleStrip,
586 &[Some(color_target.clone())],
587 1,
588 );
589
590 let path_rasterization = create_pipeline(
591 "path_rasterization",
592 "vs_path_rasterization",
593 "fs_path_rasterization",
594 &layouts.globals,
595 &layouts.instances,
596 wgpu::PrimitiveTopology::TriangleList,
597 &[Some(wgpu::ColorTargetState {
598 format: surface_format,
599 blend: Some(wgpu::BlendState::PREMULTIPLIED_ALPHA_BLENDING),
600 write_mask: wgpu::ColorWrites::ALL,
601 })],
602 path_sample_count,
603 );
604
605 let paths_blend = wgpu::BlendState {
606 color: wgpu::BlendComponent {
607 src_factor: wgpu::BlendFactor::One,
608 dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
609 operation: wgpu::BlendOperation::Add,
610 },
611 alpha: wgpu::BlendComponent {
612 src_factor: wgpu::BlendFactor::One,
613 dst_factor: wgpu::BlendFactor::One,
614 operation: wgpu::BlendOperation::Add,
615 },
616 };
617
618 let paths = create_pipeline(
619 "paths",
620 "vs_path",
621 "fs_path",
622 &layouts.globals,
623 &layouts.instances_with_texture,
624 wgpu::PrimitiveTopology::TriangleStrip,
625 &[Some(wgpu::ColorTargetState {
626 format: surface_format,
627 blend: Some(paths_blend),
628 write_mask: wgpu::ColorWrites::ALL,
629 })],
630 1,
631 );
632
633 let underlines = create_pipeline(
634 "underlines",
635 "vs_underline",
636 "fs_underline",
637 &layouts.globals,
638 &layouts.instances,
639 wgpu::PrimitiveTopology::TriangleStrip,
640 &[Some(color_target.clone())],
641 1,
642 );
643
644 let mono_sprites = create_pipeline(
645 "mono_sprites",
646 "vs_mono_sprite",
647 "fs_mono_sprite",
648 &layouts.globals,
649 &layouts.instances_with_texture,
650 wgpu::PrimitiveTopology::TriangleStrip,
651 &[Some(color_target.clone())],
652 1,
653 );
654
655 let subpixel_sprites = if dual_source_blending {
656 let subpixel_blend = wgpu::BlendState {
657 color: wgpu::BlendComponent {
658 src_factor: wgpu::BlendFactor::Src1,
659 dst_factor: wgpu::BlendFactor::OneMinusSrc1,
660 operation: wgpu::BlendOperation::Add,
661 },
662 alpha: wgpu::BlendComponent {
663 src_factor: wgpu::BlendFactor::One,
664 dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
665 operation: wgpu::BlendOperation::Add,
666 },
667 };
668
669 Some(create_pipeline(
670 "subpixel_sprites",
671 "vs_subpixel_sprite",
672 "fs_subpixel_sprite",
673 &layouts.globals,
674 &layouts.instances_with_texture,
675 wgpu::PrimitiveTopology::TriangleStrip,
676 &[Some(wgpu::ColorTargetState {
677 format: surface_format,
678 blend: Some(subpixel_blend),
679 write_mask: wgpu::ColorWrites::COLOR,
680 })],
681 1,
682 ))
683 } else {
684 None
685 };
686
687 let poly_sprites = create_pipeline(
688 "poly_sprites",
689 "vs_poly_sprite",
690 "fs_poly_sprite",
691 &layouts.globals,
692 &layouts.instances_with_texture,
693 wgpu::PrimitiveTopology::TriangleStrip,
694 &[Some(color_target.clone())],
695 1,
696 );
697
698 let surfaces = create_pipeline(
699 "surfaces",
700 "vs_surface",
701 "fs_surface",
702 &layouts.globals,
703 &layouts.surfaces,
704 wgpu::PrimitiveTopology::TriangleStrip,
705 &[Some(color_target)],
706 1,
707 );
708
709 WgpuPipelines {
710 quads,
711 shadows,
712 path_rasterization,
713 paths,
714 underlines,
715 mono_sprites,
716 subpixel_sprites,
717 poly_sprites,
718 surfaces,
719 }
720 }
721
722 fn create_path_intermediate(
723 device: &wgpu::Device,
724 format: wgpu::TextureFormat,
725 width: u32,
726 height: u32,
727 ) -> (wgpu::Texture, wgpu::TextureView) {
728 let texture = device.create_texture(&wgpu::TextureDescriptor {
729 label: Some("path_intermediate"),
730 size: wgpu::Extent3d {
731 width: width.max(1),
732 height: height.max(1),
733 depth_or_array_layers: 1,
734 },
735 mip_level_count: 1,
736 sample_count: 1,
737 dimension: wgpu::TextureDimension::D2,
738 format,
739 usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::TEXTURE_BINDING,
740 view_formats: &[],
741 });
742 let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
743 (texture, view)
744 }
745
746 fn create_msaa_if_needed(
747 device: &wgpu::Device,
748 format: wgpu::TextureFormat,
749 width: u32,
750 height: u32,
751 sample_count: u32,
752 ) -> Option<(wgpu::Texture, wgpu::TextureView)> {
753 if sample_count <= 1 {
754 return None;
755 }
756 let texture = device.create_texture(&wgpu::TextureDescriptor {
757 label: Some("path_msaa"),
758 size: wgpu::Extent3d {
759 width: width.max(1),
760 height: height.max(1),
761 depth_or_array_layers: 1,
762 },
763 mip_level_count: 1,
764 sample_count,
765 dimension: wgpu::TextureDimension::D2,
766 format,
767 usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
768 view_formats: &[],
769 });
770 let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
771 Some((texture, view))
772 }
773
774 pub fn update_drawable_size(&mut self, size: Size<DevicePixels>) {
775 let width = size.width.0 as u32;
776 let height = size.height.0 as u32;
777
778 if width != self.surface_config.width || height != self.surface_config.height {
779 self.surface_config.width = width.max(1);
780 self.surface_config.height = height.max(1);
781 self.surface.configure(&self.device, &self.surface_config);
782
783 let (path_intermediate_texture, path_intermediate_view) =
784 Self::create_path_intermediate(
785 &self.device,
786 self.surface_config.format,
787 self.surface_config.width,
788 self.surface_config.height,
789 );
790 self.path_intermediate_texture = path_intermediate_texture;
791 self.path_intermediate_view = path_intermediate_view;
792
793 let (path_msaa_texture, path_msaa_view) = Self::create_msaa_if_needed(
794 &self.device,
795 self.surface_config.format,
796 self.surface_config.width,
797 self.surface_config.height,
798 self.rendering_params.path_sample_count,
799 )
800 .map(|(t, v)| (Some(t), Some(v)))
801 .unwrap_or((None, None));
802 self.path_msaa_texture = path_msaa_texture;
803 self.path_msaa_view = path_msaa_view;
804 }
805 }
806
807 pub fn update_transparency(&mut self, transparent: bool) {
808 let new_alpha_mode = if transparent {
809 self.transparent_alpha_mode
810 } else {
811 self.opaque_alpha_mode
812 };
813
814 if new_alpha_mode != self.surface_config.alpha_mode {
815 self.surface_config.alpha_mode = new_alpha_mode;
816 self.surface.configure(&self.device, &self.surface_config);
817 self.pipelines = Self::create_pipelines(
818 &self.device,
819 &self.bind_group_layouts,
820 self.surface_config.format,
821 self.surface_config.alpha_mode,
822 self.rendering_params.path_sample_count,
823 self.dual_source_blending,
824 );
825 }
826 }
827
828 #[allow(dead_code)]
829 pub fn viewport_size(&self) -> Size<DevicePixels> {
830 Size {
831 width: DevicePixels(self.surface_config.width as i32),
832 height: DevicePixels(self.surface_config.height as i32),
833 }
834 }
835
836 pub fn sprite_atlas(&self) -> &Arc<WgpuAtlas> {
837 &self.atlas
838 }
839
840 pub fn gpu_specs(&self) -> GpuSpecs {
841 GpuSpecs {
842 is_software_emulated: self.adapter_info.device_type == wgpu::DeviceType::Cpu,
843 device_name: self.adapter_info.name.clone(),
844 driver_name: self.adapter_info.driver.clone(),
845 driver_info: self.adapter_info.driver_info.clone(),
846 }
847 }
848
849 pub fn draw(&mut self, scene: &Scene) {
850 self.atlas.before_frame();
851
852 let frame = match self.surface.get_current_texture() {
853 Ok(frame) => frame,
854 Err(wgpu::SurfaceError::Lost | wgpu::SurfaceError::Outdated) => {
855 self.surface.configure(&self.device, &self.surface_config);
856 return;
857 }
858 Err(e) => {
859 log::error!("Failed to acquire surface texture: {e}");
860 return;
861 }
862 };
863 let frame_view = frame
864 .texture
865 .create_view(&wgpu::TextureViewDescriptor::default());
866
867 let gamma_params = GammaParams {
868 gamma_ratios: self.rendering_params.gamma_ratios,
869 grayscale_enhanced_contrast: self.rendering_params.grayscale_enhanced_contrast,
870 subpixel_enhanced_contrast: self.rendering_params.subpixel_enhanced_contrast,
871 _pad: [0.0; 2],
872 };
873
874 let globals = GlobalParams {
875 viewport_size: [
876 self.surface_config.width as f32,
877 self.surface_config.height as f32,
878 ],
879 premultiplied_alpha: if self.surface_config.alpha_mode
880 == wgpu::CompositeAlphaMode::PreMultiplied
881 {
882 1
883 } else {
884 0
885 },
886 pad: 0,
887 };
888
889 let path_globals = GlobalParams {
890 premultiplied_alpha: 0,
891 ..globals
892 };
893
894 self.queue
895 .write_buffer(&self.globals_buffer, 0, bytemuck::bytes_of(&globals));
896 self.queue.write_buffer(
897 &self.globals_buffer,
898 self.path_globals_offset,
899 bytemuck::bytes_of(&path_globals),
900 );
901 self.queue.write_buffer(
902 &self.globals_buffer,
903 self.gamma_offset,
904 bytemuck::bytes_of(&gamma_params),
905 );
906
907 loop {
908 let mut instance_offset: u64 = 0;
909 let mut overflow = false;
910
911 let mut encoder = self
912 .device
913 .create_command_encoder(&wgpu::CommandEncoderDescriptor {
914 label: Some("main_encoder"),
915 });
916
917 {
918 let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
919 label: Some("main_pass"),
920 color_attachments: &[Some(wgpu::RenderPassColorAttachment {
921 view: &frame_view,
922 resolve_target: None,
923 ops: wgpu::Operations {
924 load: wgpu::LoadOp::Clear(wgpu::Color::TRANSPARENT),
925 store: wgpu::StoreOp::Store,
926 },
927 depth_slice: None,
928 })],
929 depth_stencil_attachment: None,
930 ..Default::default()
931 });
932
933 for batch in scene.batches() {
934 let ok = match batch {
935 PrimitiveBatch::Quads(range) => {
936 self.draw_quads(&scene.quads[range], &mut instance_offset, &mut pass)
937 }
938 PrimitiveBatch::Shadows(range) => self.draw_shadows(
939 &scene.shadows[range],
940 &mut instance_offset,
941 &mut pass,
942 ),
943 PrimitiveBatch::Paths(range) => {
944 let paths = &scene.paths[range];
945 if paths.is_empty() {
946 continue;
947 }
948
949 drop(pass);
950
951 let did_draw = self.draw_paths_to_intermediate(
952 &mut encoder,
953 paths,
954 &mut instance_offset,
955 );
956
957 pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
958 label: Some("main_pass_continued"),
959 color_attachments: &[Some(wgpu::RenderPassColorAttachment {
960 view: &frame_view,
961 resolve_target: None,
962 ops: wgpu::Operations {
963 load: wgpu::LoadOp::Load,
964 store: wgpu::StoreOp::Store,
965 },
966 depth_slice: None,
967 })],
968 depth_stencil_attachment: None,
969 ..Default::default()
970 });
971
972 if did_draw {
973 self.draw_paths_from_intermediate(
974 paths,
975 &mut instance_offset,
976 &mut pass,
977 )
978 } else {
979 false
980 }
981 }
982 PrimitiveBatch::Underlines(range) => self.draw_underlines(
983 &scene.underlines[range],
984 &mut instance_offset,
985 &mut pass,
986 ),
987 PrimitiveBatch::MonochromeSprites { texture_id, range } => self
988 .draw_monochrome_sprites(
989 &scene.monochrome_sprites[range],
990 texture_id,
991 &mut instance_offset,
992 &mut pass,
993 ),
994 PrimitiveBatch::SubpixelSprites { texture_id, range } => self
995 .draw_subpixel_sprites(
996 &scene.subpixel_sprites[range],
997 texture_id,
998 &mut instance_offset,
999 &mut pass,
1000 ),
1001 PrimitiveBatch::PolychromeSprites { texture_id, range } => self
1002 .draw_polychrome_sprites(
1003 &scene.polychrome_sprites[range],
1004 texture_id,
1005 &mut instance_offset,
1006 &mut pass,
1007 ),
1008 PrimitiveBatch::Surfaces(_surfaces) => {
1009 // Surfaces are macOS-only for video playback
1010 // Not implemented for Linux/wgpu
1011 true
1012 }
1013 };
1014 if !ok {
1015 overflow = true;
1016 break;
1017 }
1018 }
1019 }
1020
1021 if overflow {
1022 drop(encoder);
1023 if self.instance_buffer_capacity >= 256 * 1024 * 1024 {
1024 log::error!(
1025 "instance buffer size grew too large: {}",
1026 self.instance_buffer_capacity
1027 );
1028 frame.present();
1029 return;
1030 }
1031 self.grow_instance_buffer();
1032 continue;
1033 }
1034
1035 self.queue.submit(std::iter::once(encoder.finish()));
1036 frame.present();
1037 return;
1038 }
1039 }
1040
1041 fn draw_quads(
1042 &self,
1043 quads: &[Quad],
1044 instance_offset: &mut u64,
1045 pass: &mut wgpu::RenderPass<'_>,
1046 ) -> bool {
1047 let data = unsafe { Self::instance_bytes(quads) };
1048 self.draw_instances(
1049 data,
1050 quads.len() as u32,
1051 &self.pipelines.quads,
1052 instance_offset,
1053 pass,
1054 )
1055 }
1056
1057 fn draw_shadows(
1058 &self,
1059 shadows: &[Shadow],
1060 instance_offset: &mut u64,
1061 pass: &mut wgpu::RenderPass<'_>,
1062 ) -> bool {
1063 let data = unsafe { Self::instance_bytes(shadows) };
1064 self.draw_instances(
1065 data,
1066 shadows.len() as u32,
1067 &self.pipelines.shadows,
1068 instance_offset,
1069 pass,
1070 )
1071 }
1072
1073 fn draw_underlines(
1074 &self,
1075 underlines: &[Underline],
1076 instance_offset: &mut u64,
1077 pass: &mut wgpu::RenderPass<'_>,
1078 ) -> bool {
1079 let data = unsafe { Self::instance_bytes(underlines) };
1080 self.draw_instances(
1081 data,
1082 underlines.len() as u32,
1083 &self.pipelines.underlines,
1084 instance_offset,
1085 pass,
1086 )
1087 }
1088
1089 fn draw_monochrome_sprites(
1090 &self,
1091 sprites: &[MonochromeSprite],
1092 texture_id: AtlasTextureId,
1093 instance_offset: &mut u64,
1094 pass: &mut wgpu::RenderPass<'_>,
1095 ) -> bool {
1096 let tex_info = self.atlas.get_texture_info(texture_id);
1097 let data = unsafe { Self::instance_bytes(sprites) };
1098 self.draw_instances_with_texture(
1099 data,
1100 sprites.len() as u32,
1101 &tex_info.view,
1102 &self.pipelines.mono_sprites,
1103 instance_offset,
1104 pass,
1105 )
1106 }
1107
1108 fn draw_subpixel_sprites(
1109 &self,
1110 sprites: &[SubpixelSprite],
1111 texture_id: AtlasTextureId,
1112 instance_offset: &mut u64,
1113 pass: &mut wgpu::RenderPass<'_>,
1114 ) -> bool {
1115 let tex_info = self.atlas.get_texture_info(texture_id);
1116 let data = unsafe { Self::instance_bytes(sprites) };
1117 let pipeline = self
1118 .pipelines
1119 .subpixel_sprites
1120 .as_ref()
1121 .unwrap_or(&self.pipelines.mono_sprites);
1122 self.draw_instances_with_texture(
1123 data,
1124 sprites.len() as u32,
1125 &tex_info.view,
1126 pipeline,
1127 instance_offset,
1128 pass,
1129 )
1130 }
1131
1132 fn draw_polychrome_sprites(
1133 &self,
1134 sprites: &[PolychromeSprite],
1135 texture_id: AtlasTextureId,
1136 instance_offset: &mut u64,
1137 pass: &mut wgpu::RenderPass<'_>,
1138 ) -> bool {
1139 let tex_info = self.atlas.get_texture_info(texture_id);
1140 let data = unsafe { Self::instance_bytes(sprites) };
1141 self.draw_instances_with_texture(
1142 data,
1143 sprites.len() as u32,
1144 &tex_info.view,
1145 &self.pipelines.poly_sprites,
1146 instance_offset,
1147 pass,
1148 )
1149 }
1150
1151 fn draw_instances(
1152 &self,
1153 data: &[u8],
1154 instance_count: u32,
1155 pipeline: &wgpu::RenderPipeline,
1156 instance_offset: &mut u64,
1157 pass: &mut wgpu::RenderPass<'_>,
1158 ) -> bool {
1159 if instance_count == 0 {
1160 return true;
1161 }
1162 let Some((offset, size)) = self.write_to_instance_buffer(instance_offset, data) else {
1163 return false;
1164 };
1165 let bind_group = self.device.create_bind_group(&wgpu::BindGroupDescriptor {
1166 label: None,
1167 layout: &self.bind_group_layouts.instances,
1168 entries: &[wgpu::BindGroupEntry {
1169 binding: 0,
1170 resource: self.instance_binding(offset, size),
1171 }],
1172 });
1173 pass.set_pipeline(pipeline);
1174 pass.set_bind_group(0, &self.globals_bind_group, &[]);
1175 pass.set_bind_group(1, &bind_group, &[]);
1176 pass.draw(0..4, 0..instance_count);
1177 true
1178 }
1179
1180 fn draw_instances_with_texture(
1181 &self,
1182 data: &[u8],
1183 instance_count: u32,
1184 texture_view: &wgpu::TextureView,
1185 pipeline: &wgpu::RenderPipeline,
1186 instance_offset: &mut u64,
1187 pass: &mut wgpu::RenderPass<'_>,
1188 ) -> bool {
1189 if instance_count == 0 {
1190 return true;
1191 }
1192 let Some((offset, size)) = self.write_to_instance_buffer(instance_offset, data) else {
1193 return false;
1194 };
1195 let bind_group = self.device.create_bind_group(&wgpu::BindGroupDescriptor {
1196 label: None,
1197 layout: &self.bind_group_layouts.instances_with_texture,
1198 entries: &[
1199 wgpu::BindGroupEntry {
1200 binding: 0,
1201 resource: self.instance_binding(offset, size),
1202 },
1203 wgpu::BindGroupEntry {
1204 binding: 1,
1205 resource: wgpu::BindingResource::TextureView(texture_view),
1206 },
1207 wgpu::BindGroupEntry {
1208 binding: 2,
1209 resource: wgpu::BindingResource::Sampler(&self.atlas_sampler),
1210 },
1211 ],
1212 });
1213 pass.set_pipeline(pipeline);
1214 pass.set_bind_group(0, &self.globals_bind_group, &[]);
1215 pass.set_bind_group(1, &bind_group, &[]);
1216 pass.draw(0..4, 0..instance_count);
1217 true
1218 }
1219
1220 unsafe fn instance_bytes<T>(instances: &[T]) -> &[u8] {
1221 unsafe {
1222 std::slice::from_raw_parts(
1223 instances.as_ptr() as *const u8,
1224 std::mem::size_of_val(instances),
1225 )
1226 }
1227 }
1228
1229 fn draw_paths_from_intermediate(
1230 &self,
1231 paths: &[Path<ScaledPixels>],
1232 instance_offset: &mut u64,
1233 pass: &mut wgpu::RenderPass<'_>,
1234 ) -> bool {
1235 let first_path = &paths[0];
1236 let sprites: Vec<PathSprite> = if paths.last().map(|p| &p.order) == Some(&first_path.order)
1237 {
1238 paths
1239 .iter()
1240 .map(|p| PathSprite {
1241 bounds: p.clipped_bounds(),
1242 })
1243 .collect()
1244 } else {
1245 let mut bounds = first_path.clipped_bounds();
1246 for path in paths.iter().skip(1) {
1247 bounds = bounds.union(&path.clipped_bounds());
1248 }
1249 vec![PathSprite { bounds }]
1250 };
1251
1252 let sprite_data = unsafe { Self::instance_bytes(&sprites) };
1253 self.draw_instances_with_texture(
1254 sprite_data,
1255 sprites.len() as u32,
1256 &self.path_intermediate_view,
1257 &self.pipelines.paths,
1258 instance_offset,
1259 pass,
1260 )
1261 }
1262
1263 fn draw_paths_to_intermediate(
1264 &self,
1265 encoder: &mut wgpu::CommandEncoder,
1266 paths: &[Path<ScaledPixels>],
1267 instance_offset: &mut u64,
1268 ) -> bool {
1269 let mut vertices = Vec::new();
1270 for path in paths {
1271 let bounds = path.clipped_bounds();
1272 vertices.extend(path.vertices.iter().map(|v| PathRasterizationVertex {
1273 xy_position: v.xy_position,
1274 st_position: v.st_position,
1275 color: path.color,
1276 bounds,
1277 }));
1278 }
1279
1280 if vertices.is_empty() {
1281 return true;
1282 }
1283
1284 let vertex_data = unsafe { Self::instance_bytes(&vertices) };
1285 let Some((vertex_offset, vertex_size)) =
1286 self.write_to_instance_buffer(instance_offset, vertex_data)
1287 else {
1288 return false;
1289 };
1290
1291 let data_bind_group = self.device.create_bind_group(&wgpu::BindGroupDescriptor {
1292 label: Some("path_rasterization_bind_group"),
1293 layout: &self.bind_group_layouts.instances,
1294 entries: &[wgpu::BindGroupEntry {
1295 binding: 0,
1296 resource: self.instance_binding(vertex_offset, vertex_size),
1297 }],
1298 });
1299
1300 let (target_view, resolve_target) = if let Some(ref msaa_view) = self.path_msaa_view {
1301 (msaa_view, Some(&self.path_intermediate_view))
1302 } else {
1303 (&self.path_intermediate_view, None)
1304 };
1305
1306 {
1307 let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
1308 label: Some("path_rasterization_pass"),
1309 color_attachments: &[Some(wgpu::RenderPassColorAttachment {
1310 view: target_view,
1311 resolve_target,
1312 ops: wgpu::Operations {
1313 load: wgpu::LoadOp::Clear(wgpu::Color::TRANSPARENT),
1314 store: wgpu::StoreOp::Store,
1315 },
1316 depth_slice: None,
1317 })],
1318 depth_stencil_attachment: None,
1319 ..Default::default()
1320 });
1321
1322 pass.set_pipeline(&self.pipelines.path_rasterization);
1323 pass.set_bind_group(0, &self.path_globals_bind_group, &[]);
1324 pass.set_bind_group(1, &data_bind_group, &[]);
1325 pass.draw(0..vertices.len() as u32, 0..1);
1326 }
1327
1328 true
1329 }
1330
1331 fn grow_instance_buffer(&mut self) {
1332 let new_capacity = self.instance_buffer_capacity * 2;
1333 log::info!("increased instance buffer size to {}", new_capacity);
1334 self.instance_buffer = self.device.create_buffer(&wgpu::BufferDescriptor {
1335 label: Some("instance_buffer"),
1336 size: new_capacity,
1337 usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
1338 mapped_at_creation: false,
1339 });
1340 self.instance_buffer_capacity = new_capacity;
1341 }
1342
1343 fn write_to_instance_buffer(
1344 &self,
1345 instance_offset: &mut u64,
1346 data: &[u8],
1347 ) -> Option<(u64, NonZeroU64)> {
1348 let offset = (*instance_offset).next_multiple_of(self.storage_buffer_alignment);
1349 let size = (data.len() as u64).max(16);
1350 if offset + size > self.instance_buffer_capacity {
1351 return None;
1352 }
1353 self.queue.write_buffer(&self.instance_buffer, offset, data);
1354 *instance_offset = offset + size;
1355 Some((offset, NonZeroU64::new(size).expect("size is at least 16")))
1356 }
1357
1358 fn instance_binding(&self, offset: u64, size: NonZeroU64) -> wgpu::BindingResource<'_> {
1359 wgpu::BindingResource::Buffer(wgpu::BufferBinding {
1360 buffer: &self.instance_buffer,
1361 offset,
1362 size: Some(size),
1363 })
1364 }
1365
1366 pub fn destroy(&mut self) {
1367 // wgpu resources are automatically cleaned up when dropped
1368 }
1369}
1370
1371struct RenderingParameters {
1372 path_sample_count: u32,
1373 gamma_ratios: [f32; 4],
1374 grayscale_enhanced_contrast: f32,
1375 subpixel_enhanced_contrast: f32,
1376}
1377
1378impl RenderingParameters {
1379 fn new(adapter: &wgpu::Adapter, surface_format: wgpu::TextureFormat) -> Self {
1380 use std::env;
1381
1382 let format_features = adapter.get_texture_format_features(surface_format);
1383 let path_sample_count = [4, 2, 1]
1384 .into_iter()
1385 .find(|&n| format_features.flags.sample_count_supported(n))
1386 .unwrap_or(1);
1387
1388 let gamma = env::var("ZED_FONTS_GAMMA")
1389 .ok()
1390 .and_then(|v| v.parse().ok())
1391 .unwrap_or(1.8_f32)
1392 .clamp(1.0, 2.2);
1393 let gamma_ratios = get_gamma_correction_ratios(gamma);
1394
1395 let grayscale_enhanced_contrast = env::var("ZED_FONTS_GRAYSCALE_ENHANCED_CONTRAST")
1396 .ok()
1397 .and_then(|v| v.parse().ok())
1398 .unwrap_or(1.0_f32)
1399 .max(0.0);
1400
1401 let subpixel_enhanced_contrast = env::var("ZED_FONTS_SUBPIXEL_ENHANCED_CONTRAST")
1402 .ok()
1403 .and_then(|v| v.parse().ok())
1404 .unwrap_or(0.5_f32)
1405 .max(0.0);
1406
1407 Self {
1408 path_sample_count,
1409 gamma_ratios,
1410 grayscale_enhanced_contrast,
1411 subpixel_enhanced_contrast,
1412 }
1413 }
1414}