1// Doing `if let` gives you nice scoping with passes/encoders
2#![allow(irrefutable_let_patterns)]
3
4use super::{BladeAtlas, BladeContext};
5use crate::{
6 Background, Bounds, DevicePixels, GpuSpecs, MonochromeSprite, Path, Point, PolychromeSprite,
7 PrimitiveBatch, Quad, ScaledPixels, Scene, Shadow, Size, Underline,
8};
9use blade_graphics as gpu;
10use blade_util::{BufferBelt, BufferBeltDescriptor};
11use bytemuck::{Pod, Zeroable};
12#[cfg(target_os = "macos")]
13use media::core_video::CVMetalTextureCache;
14use std::sync::Arc;
15
16const MAX_FRAME_TIME_MS: u32 = 10000;
17
18#[repr(C)]
19#[derive(Clone, Copy, Pod, Zeroable)]
20struct GlobalParams {
21 viewport_size: [f32; 2],
22 premultiplied_alpha: u32,
23 pad: u32,
24}
25
26//Note: we can't use `Bounds` directly here because
27// it doesn't implement Pod + Zeroable
28#[repr(C)]
29#[derive(Clone, Copy, Pod, Zeroable)]
30struct PodBounds {
31 origin: [f32; 2],
32 size: [f32; 2],
33}
34
35impl From<Bounds<ScaledPixels>> for PodBounds {
36 fn from(bounds: Bounds<ScaledPixels>) -> Self {
37 Self {
38 origin: [bounds.origin.x.0, bounds.origin.y.0],
39 size: [bounds.size.width.0, bounds.size.height.0],
40 }
41 }
42}
43
44#[repr(C)]
45#[derive(Clone, Copy, Pod, Zeroable)]
46struct SurfaceParams {
47 bounds: PodBounds,
48 content_mask: PodBounds,
49}
50
51#[derive(blade_macros::ShaderData)]
52struct ShaderQuadsData {
53 globals: GlobalParams,
54 b_quads: gpu::BufferPiece,
55}
56
57#[derive(blade_macros::ShaderData)]
58struct ShaderShadowsData {
59 globals: GlobalParams,
60 b_shadows: gpu::BufferPiece,
61}
62
63#[derive(blade_macros::ShaderData)]
64struct ShaderPathRasterizationData {
65 globals: GlobalParams,
66 b_path_vertices: gpu::BufferPiece,
67}
68
69#[derive(blade_macros::ShaderData)]
70struct ShaderPathsData {
71 globals: GlobalParams,
72 t_sprite: gpu::TextureView,
73 s_sprite: gpu::Sampler,
74 b_path_sprites: gpu::BufferPiece,
75}
76
77#[derive(blade_macros::ShaderData)]
78struct ShaderUnderlinesData {
79 globals: GlobalParams,
80 b_underlines: gpu::BufferPiece,
81}
82
83#[derive(blade_macros::ShaderData)]
84struct ShaderMonoSpritesData {
85 globals: GlobalParams,
86 t_sprite: gpu::TextureView,
87 s_sprite: gpu::Sampler,
88 b_mono_sprites: gpu::BufferPiece,
89}
90
91#[derive(blade_macros::ShaderData)]
92struct ShaderPolySpritesData {
93 globals: GlobalParams,
94 t_sprite: gpu::TextureView,
95 s_sprite: gpu::Sampler,
96 b_poly_sprites: gpu::BufferPiece,
97}
98
99#[derive(blade_macros::ShaderData)]
100struct ShaderSurfacesData {
101 globals: GlobalParams,
102 surface_locals: SurfaceParams,
103 t_y: gpu::TextureView,
104 t_cb_cr: gpu::TextureView,
105 s_surface: gpu::Sampler,
106}
107
108#[derive(Clone, Debug, Eq, PartialEq)]
109#[repr(C)]
110struct PathSprite {
111 bounds: Bounds<ScaledPixels>,
112}
113
114#[derive(Clone, Debug)]
115#[repr(C)]
116struct PathRasterizationVertex {
117 xy_position: Point<ScaledPixels>,
118 st_position: Point<f32>,
119 color: Background,
120 bounds: Bounds<ScaledPixels>,
121}
122
123struct BladePipelines {
124 quads: gpu::RenderPipeline,
125 shadows: gpu::RenderPipeline,
126 path_rasterization: gpu::RenderPipeline,
127 paths: gpu::RenderPipeline,
128 underlines: gpu::RenderPipeline,
129 mono_sprites: gpu::RenderPipeline,
130 poly_sprites: gpu::RenderPipeline,
131 surfaces: gpu::RenderPipeline,
132}
133
134impl BladePipelines {
135 fn new(gpu: &gpu::Context, surface_info: gpu::SurfaceInfo, path_sample_count: u32) -> Self {
136 use gpu::ShaderData as _;
137
138 log::info!(
139 "Initializing Blade pipelines for surface {:?}",
140 surface_info
141 );
142 let shader = gpu.create_shader(gpu::ShaderDesc {
143 source: include_str!("shaders.wgsl"),
144 });
145 shader.check_struct_size::<GlobalParams>();
146 shader.check_struct_size::<SurfaceParams>();
147 shader.check_struct_size::<Quad>();
148 shader.check_struct_size::<Shadow>();
149 shader.check_struct_size::<PathRasterizationVertex>();
150 shader.check_struct_size::<PathSprite>();
151 shader.check_struct_size::<Underline>();
152 shader.check_struct_size::<MonochromeSprite>();
153 shader.check_struct_size::<PolychromeSprite>();
154
155 // See https://apoorvaj.io/alpha-compositing-opengl-blending-and-premultiplied-alpha/
156 let blend_mode = match surface_info.alpha {
157 gpu::AlphaMode::Ignored => gpu::BlendState::ALPHA_BLENDING,
158 gpu::AlphaMode::PreMultiplied => gpu::BlendState::PREMULTIPLIED_ALPHA_BLENDING,
159 gpu::AlphaMode::PostMultiplied => gpu::BlendState::ALPHA_BLENDING,
160 };
161 let color_targets = &[gpu::ColorTargetState {
162 format: surface_info.format,
163 blend: Some(blend_mode),
164 write_mask: gpu::ColorWrites::default(),
165 }];
166
167 Self {
168 quads: gpu.create_render_pipeline(gpu::RenderPipelineDesc {
169 name: "quads",
170 data_layouts: &[&ShaderQuadsData::layout()],
171 vertex: shader.at("vs_quad"),
172 vertex_fetches: &[],
173 primitive: gpu::PrimitiveState {
174 topology: gpu::PrimitiveTopology::TriangleStrip,
175 ..Default::default()
176 },
177 depth_stencil: None,
178 fragment: Some(shader.at("fs_quad")),
179 color_targets,
180 multisample_state: gpu::MultisampleState::default(),
181 }),
182 shadows: gpu.create_render_pipeline(gpu::RenderPipelineDesc {
183 name: "shadows",
184 data_layouts: &[&ShaderShadowsData::layout()],
185 vertex: shader.at("vs_shadow"),
186 vertex_fetches: &[],
187 primitive: gpu::PrimitiveState {
188 topology: gpu::PrimitiveTopology::TriangleStrip,
189 ..Default::default()
190 },
191 depth_stencil: None,
192 fragment: Some(shader.at("fs_shadow")),
193 color_targets,
194 multisample_state: gpu::MultisampleState::default(),
195 }),
196 path_rasterization: gpu.create_render_pipeline(gpu::RenderPipelineDesc {
197 name: "path_rasterization",
198 data_layouts: &[&ShaderPathRasterizationData::layout()],
199 vertex: shader.at("vs_path_rasterization"),
200 vertex_fetches: &[],
201 primitive: gpu::PrimitiveState {
202 topology: gpu::PrimitiveTopology::TriangleList,
203 ..Default::default()
204 },
205 depth_stencil: None,
206 fragment: Some(shader.at("fs_path_rasterization")),
207 // The original implementation was using ADDITIVE blende mode,
208 // I don't know why
209 // color_targets: &[gpu::ColorTargetState {
210 // format: PATH_TEXTURE_FORMAT,
211 // blend: Some(gpu::BlendState::ADDITIVE),
212 // write_mask: gpu::ColorWrites::default(),
213 // }],
214 color_targets: &[gpu::ColorTargetState {
215 format: surface_info.format,
216 blend: Some(gpu::BlendState::PREMULTIPLIED_ALPHA_BLENDING),
217 write_mask: gpu::ColorWrites::default(),
218 }],
219 multisample_state: gpu::MultisampleState {
220 sample_count: path_sample_count,
221 ..Default::default()
222 },
223 }),
224 paths: gpu.create_render_pipeline(gpu::RenderPipelineDesc {
225 name: "paths",
226 data_layouts: &[&ShaderPathsData::layout()],
227 vertex: shader.at("vs_path"),
228 vertex_fetches: &[],
229 primitive: gpu::PrimitiveState {
230 topology: gpu::PrimitiveTopology::TriangleStrip,
231 ..Default::default()
232 },
233 depth_stencil: None,
234 fragment: Some(shader.at("fs_path")),
235 color_targets: &[gpu::ColorTargetState {
236 format: surface_info.format,
237 blend: Some(gpu::BlendState {
238 color: gpu::BlendComponent::OVER,
239 alpha: gpu::BlendComponent::ADDITIVE,
240 }),
241 write_mask: gpu::ColorWrites::default(),
242 }],
243 multisample_state: gpu::MultisampleState::default(),
244 }),
245 underlines: gpu.create_render_pipeline(gpu::RenderPipelineDesc {
246 name: "underlines",
247 data_layouts: &[&ShaderUnderlinesData::layout()],
248 vertex: shader.at("vs_underline"),
249 vertex_fetches: &[],
250 primitive: gpu::PrimitiveState {
251 topology: gpu::PrimitiveTopology::TriangleStrip,
252 ..Default::default()
253 },
254 depth_stencil: None,
255 fragment: Some(shader.at("fs_underline")),
256 color_targets,
257 multisample_state: gpu::MultisampleState::default(),
258 }),
259 mono_sprites: gpu.create_render_pipeline(gpu::RenderPipelineDesc {
260 name: "mono-sprites",
261 data_layouts: &[&ShaderMonoSpritesData::layout()],
262 vertex: shader.at("vs_mono_sprite"),
263 vertex_fetches: &[],
264 primitive: gpu::PrimitiveState {
265 topology: gpu::PrimitiveTopology::TriangleStrip,
266 ..Default::default()
267 },
268 depth_stencil: None,
269 fragment: Some(shader.at("fs_mono_sprite")),
270 color_targets,
271 multisample_state: gpu::MultisampleState::default(),
272 }),
273 poly_sprites: gpu.create_render_pipeline(gpu::RenderPipelineDesc {
274 name: "poly-sprites",
275 data_layouts: &[&ShaderPolySpritesData::layout()],
276 vertex: shader.at("vs_poly_sprite"),
277 vertex_fetches: &[],
278 primitive: gpu::PrimitiveState {
279 topology: gpu::PrimitiveTopology::TriangleStrip,
280 ..Default::default()
281 },
282 depth_stencil: None,
283 fragment: Some(shader.at("fs_poly_sprite")),
284 color_targets,
285 multisample_state: gpu::MultisampleState::default(),
286 }),
287 surfaces: gpu.create_render_pipeline(gpu::RenderPipelineDesc {
288 name: "surfaces",
289 data_layouts: &[&ShaderSurfacesData::layout()],
290 vertex: shader.at("vs_surface"),
291 vertex_fetches: &[],
292 primitive: gpu::PrimitiveState {
293 topology: gpu::PrimitiveTopology::TriangleStrip,
294 ..Default::default()
295 },
296 depth_stencil: None,
297 fragment: Some(shader.at("fs_surface")),
298 color_targets,
299 multisample_state: gpu::MultisampleState::default(),
300 }),
301 }
302 }
303
304 fn destroy(&mut self, gpu: &gpu::Context) {
305 gpu.destroy_render_pipeline(&mut self.quads);
306 gpu.destroy_render_pipeline(&mut self.shadows);
307 gpu.destroy_render_pipeline(&mut self.path_rasterization);
308 gpu.destroy_render_pipeline(&mut self.paths);
309 gpu.destroy_render_pipeline(&mut self.underlines);
310 gpu.destroy_render_pipeline(&mut self.mono_sprites);
311 gpu.destroy_render_pipeline(&mut self.poly_sprites);
312 gpu.destroy_render_pipeline(&mut self.surfaces);
313 }
314}
315
316pub struct BladeSurfaceConfig {
317 pub size: gpu::Extent,
318 pub transparent: bool,
319}
320
321//Note: we could see some of these fields moved into `BladeContext`
322// so that they are shared between windows. E.g. `pipelines`.
323// But that is complicated by the fact that pipelines depend on
324// the format and alpha mode.
325pub struct BladeRenderer {
326 gpu: Arc<gpu::Context>,
327 surface: gpu::Surface,
328 surface_config: gpu::SurfaceConfig,
329 command_encoder: gpu::CommandEncoder,
330 last_sync_point: Option<gpu::SyncPoint>,
331 pipelines: BladePipelines,
332 instance_belt: BufferBelt,
333 atlas: Arc<BladeAtlas>,
334 atlas_sampler: gpu::Sampler,
335 #[cfg(target_os = "macos")]
336 core_video_texture_cache: CVMetalTextureCache,
337 path_sample_count: u32,
338 path_intermediate_texture: gpu::Texture,
339 path_intermediate_texture_view: gpu::TextureView,
340 path_intermediate_msaa_texture: Option<gpu::Texture>,
341 path_intermediate_msaa_texture_view: Option<gpu::TextureView>,
342}
343
344impl BladeRenderer {
345 pub fn new<I: raw_window_handle::HasWindowHandle + raw_window_handle::HasDisplayHandle>(
346 context: &BladeContext,
347 window: &I,
348 config: BladeSurfaceConfig,
349 ) -> anyhow::Result<Self> {
350 let surface_config = gpu::SurfaceConfig {
351 size: config.size,
352 usage: gpu::TextureUsage::TARGET,
353 display_sync: gpu::DisplaySync::Recent,
354 color_space: gpu::ColorSpace::Linear,
355 allow_exclusive_full_screen: false,
356 transparent: config.transparent,
357 };
358 let surface = context
359 .gpu
360 .create_surface_configured(window, surface_config)
361 .map_err(|err| anyhow::anyhow!("Failed to create surface: {err:?}"))?;
362
363 let command_encoder = context.gpu.create_command_encoder(gpu::CommandEncoderDesc {
364 name: "main",
365 buffer_count: 2,
366 });
367 // workaround for https://github.com/zed-industries/zed/issues/26143
368 let path_sample_count = std::env::var("ZED_PATH_SAMPLE_COUNT")
369 .ok()
370 .and_then(|v| v.parse().ok())
371 .or_else(|| {
372 [4, 2, 1]
373 .into_iter()
374 .find(|&n| (context.gpu.capabilities().sample_count_mask & n) != 0)
375 })
376 .unwrap_or(1);
377 let pipelines = BladePipelines::new(&context.gpu, surface.info(), path_sample_count);
378 let instance_belt = BufferBelt::new(BufferBeltDescriptor {
379 memory: gpu::Memory::Shared,
380 min_chunk_size: 0x1000,
381 alignment: 0x40, // Vulkan `minStorageBufferOffsetAlignment` on Intel Xe
382 });
383 let atlas = Arc::new(BladeAtlas::new(&context.gpu));
384 let atlas_sampler = context.gpu.create_sampler(gpu::SamplerDesc {
385 name: "path rasterization sampler",
386 mag_filter: gpu::FilterMode::Linear,
387 min_filter: gpu::FilterMode::Linear,
388 ..Default::default()
389 });
390
391 let (path_intermediate_texture, path_intermediate_texture_view) =
392 create_path_intermediate_texture(
393 &context.gpu,
394 surface.info().format,
395 config.size.width,
396 config.size.height,
397 );
398 let (path_intermediate_msaa_texture, path_intermediate_msaa_texture_view) =
399 create_msaa_texture_if_needed(
400 &context.gpu,
401 surface.info().format,
402 config.size.width,
403 config.size.height,
404 path_sample_count,
405 )
406 .unzip();
407
408 #[cfg(target_os = "macos")]
409 let core_video_texture_cache = unsafe {
410 CVMetalTextureCache::new(
411 objc2::rc::Retained::as_ptr(&context.gpu.metal_device()) as *mut _
412 )
413 .unwrap()
414 };
415
416 Ok(Self {
417 gpu: Arc::clone(&context.gpu),
418 surface,
419 surface_config,
420 command_encoder,
421 last_sync_point: None,
422 pipelines,
423 instance_belt,
424 atlas,
425 atlas_sampler,
426 #[cfg(target_os = "macos")]
427 core_video_texture_cache,
428 path_sample_count,
429 path_intermediate_texture,
430 path_intermediate_texture_view,
431 path_intermediate_msaa_texture,
432 path_intermediate_msaa_texture_view,
433 })
434 }
435
436 fn wait_for_gpu(&mut self) {
437 if let Some(last_sp) = self.last_sync_point.take()
438 && !self.gpu.wait_for(&last_sp, MAX_FRAME_TIME_MS)
439 {
440 log::error!("GPU hung");
441 #[cfg(target_os = "linux")]
442 if self.gpu.device_information().driver_name == "radv" {
443 log::error!(
444 "there's a known bug with amdgpu/radv, try setting ZED_PATH_SAMPLE_COUNT=0 as a workaround"
445 );
446 log::error!(
447 "if that helps you're running into https://github.com/zed-industries/zed/issues/26143"
448 );
449 }
450 log::error!(
451 "your device information is: {:?}",
452 self.gpu.device_information()
453 );
454 while !self.gpu.wait_for(&last_sp, MAX_FRAME_TIME_MS) {}
455 }
456 }
457
458 pub fn update_drawable_size(&mut self, size: Size<DevicePixels>) {
459 self.update_drawable_size_impl(size, false);
460 }
461
462 /// Like `update_drawable_size` but skips the check that the size has changed. This is useful in
463 /// cases like restoring a window from minimization where the size is the same but the
464 /// renderer's swap chain needs to be recreated.
465 #[cfg_attr(
466 any(target_os = "macos", target_os = "linux", target_os = "freebsd"),
467 allow(dead_code)
468 )]
469 pub fn update_drawable_size_even_if_unchanged(&mut self, size: Size<DevicePixels>) {
470 self.update_drawable_size_impl(size, true);
471 }
472
473 fn update_drawable_size_impl(&mut self, size: Size<DevicePixels>, always_resize: bool) {
474 let gpu_size = gpu::Extent {
475 width: size.width.0 as u32,
476 height: size.height.0 as u32,
477 depth: 1,
478 };
479
480 if always_resize || gpu_size != self.surface_config.size {
481 self.wait_for_gpu();
482 self.surface_config.size = gpu_size;
483 self.gpu
484 .reconfigure_surface(&mut self.surface, self.surface_config);
485 self.gpu.destroy_texture(self.path_intermediate_texture);
486 self.gpu
487 .destroy_texture_view(self.path_intermediate_texture_view);
488 if let Some(msaa_texture) = self.path_intermediate_msaa_texture {
489 self.gpu.destroy_texture(msaa_texture);
490 }
491 if let Some(msaa_view) = self.path_intermediate_msaa_texture_view {
492 self.gpu.destroy_texture_view(msaa_view);
493 }
494 let (path_intermediate_texture, path_intermediate_texture_view) =
495 create_path_intermediate_texture(
496 &self.gpu,
497 self.surface.info().format,
498 gpu_size.width,
499 gpu_size.height,
500 );
501 self.path_intermediate_texture = path_intermediate_texture;
502 self.path_intermediate_texture_view = path_intermediate_texture_view;
503 let (path_intermediate_msaa_texture, path_intermediate_msaa_texture_view) =
504 create_msaa_texture_if_needed(
505 &self.gpu,
506 self.surface.info().format,
507 gpu_size.width,
508 gpu_size.height,
509 self.path_sample_count,
510 )
511 .unzip();
512 self.path_intermediate_msaa_texture = path_intermediate_msaa_texture;
513 self.path_intermediate_msaa_texture_view = path_intermediate_msaa_texture_view;
514 }
515 }
516
517 pub fn update_transparency(&mut self, transparent: bool) {
518 if transparent != self.surface_config.transparent {
519 self.wait_for_gpu();
520 self.surface_config.transparent = transparent;
521 self.gpu
522 .reconfigure_surface(&mut self.surface, self.surface_config);
523 self.pipelines.destroy(&self.gpu);
524 self.pipelines =
525 BladePipelines::new(&self.gpu, self.surface.info(), self.path_sample_count);
526 }
527 }
528
529 #[cfg_attr(
530 any(target_os = "macos", feature = "wayland", target_os = "windows"),
531 allow(dead_code)
532 )]
533 pub fn viewport_size(&self) -> gpu::Extent {
534 self.surface_config.size
535 }
536
537 pub fn sprite_atlas(&self) -> &Arc<BladeAtlas> {
538 &self.atlas
539 }
540
541 #[cfg_attr(target_os = "macos", allow(dead_code))]
542 pub fn gpu_specs(&self) -> GpuSpecs {
543 let info = self.gpu.device_information();
544
545 GpuSpecs {
546 is_software_emulated: info.is_software_emulated,
547 device_name: info.device_name.clone(),
548 driver_name: info.driver_name.clone(),
549 driver_info: info.driver_info.clone(),
550 }
551 }
552
553 #[cfg(target_os = "macos")]
554 pub fn layer(&self) -> metal::MetalLayer {
555 unsafe { foreign_types::ForeignType::from_ptr(self.layer_ptr()) }
556 }
557
558 #[cfg(target_os = "macos")]
559 pub fn layer_ptr(&self) -> *mut metal::CAMetalLayer {
560 objc2::rc::Retained::as_ptr(&self.surface.metal_layer()) as *mut _
561 }
562
563 #[profiling::function]
564 fn draw_paths_to_intermediate(
565 &mut self,
566 paths: &[Path<ScaledPixels>],
567 width: f32,
568 height: f32,
569 ) {
570 self.command_encoder
571 .init_texture(self.path_intermediate_texture);
572 if let Some(msaa_texture) = self.path_intermediate_msaa_texture {
573 self.command_encoder.init_texture(msaa_texture);
574 }
575
576 let target = if let Some(msaa_view) = self.path_intermediate_msaa_texture_view {
577 gpu::RenderTarget {
578 view: msaa_view,
579 init_op: gpu::InitOp::Clear(gpu::TextureColor::TransparentBlack),
580 finish_op: gpu::FinishOp::ResolveTo(self.path_intermediate_texture_view),
581 }
582 } else {
583 gpu::RenderTarget {
584 view: self.path_intermediate_texture_view,
585 init_op: gpu::InitOp::Clear(gpu::TextureColor::TransparentBlack),
586 finish_op: gpu::FinishOp::Store,
587 }
588 };
589 if let mut pass = self.command_encoder.render(
590 "rasterize paths",
591 gpu::RenderTargetSet {
592 colors: &[target],
593 depth_stencil: None,
594 },
595 ) {
596 let globals = GlobalParams {
597 viewport_size: [width, height],
598 premultiplied_alpha: 0,
599 pad: 0,
600 };
601 let mut encoder = pass.with(&self.pipelines.path_rasterization);
602
603 let mut vertices = Vec::new();
604 for path in paths {
605 vertices.extend(path.vertices.iter().map(|v| PathRasterizationVertex {
606 xy_position: v.xy_position,
607 st_position: v.st_position,
608 color: path.color,
609 bounds: path.clipped_bounds(),
610 }));
611 }
612 let vertex_buf = unsafe { self.instance_belt.alloc_typed(&vertices, &self.gpu) };
613 encoder.bind(
614 0,
615 &ShaderPathRasterizationData {
616 globals,
617 b_path_vertices: vertex_buf,
618 },
619 );
620 encoder.draw(0, vertices.len() as u32, 0, 1);
621 }
622 }
623
624 pub fn destroy(&mut self) {
625 self.wait_for_gpu();
626 self.atlas.destroy();
627 self.gpu.destroy_sampler(self.atlas_sampler);
628 self.instance_belt.destroy(&self.gpu);
629 self.gpu.destroy_command_encoder(&mut self.command_encoder);
630 self.pipelines.destroy(&self.gpu);
631 self.gpu.destroy_surface(&mut self.surface);
632 self.gpu.destroy_texture(self.path_intermediate_texture);
633 self.gpu
634 .destroy_texture_view(self.path_intermediate_texture_view);
635 if let Some(msaa_texture) = self.path_intermediate_msaa_texture {
636 self.gpu.destroy_texture(msaa_texture);
637 }
638 if let Some(msaa_view) = self.path_intermediate_msaa_texture_view {
639 self.gpu.destroy_texture_view(msaa_view);
640 }
641 }
642
643 pub fn draw(&mut self, scene: &Scene) {
644 self.command_encoder.start();
645 self.atlas.before_frame(&mut self.command_encoder);
646
647 let frame = {
648 profiling::scope!("acquire frame");
649 self.surface.acquire_frame()
650 };
651 self.command_encoder.init_texture(frame.texture());
652
653 let globals = GlobalParams {
654 viewport_size: [
655 self.surface_config.size.width as f32,
656 self.surface_config.size.height as f32,
657 ],
658 premultiplied_alpha: match self.surface.info().alpha {
659 gpu::AlphaMode::Ignored | gpu::AlphaMode::PostMultiplied => 0,
660 gpu::AlphaMode::PreMultiplied => 1,
661 },
662 pad: 0,
663 };
664
665 let mut pass = self.command_encoder.render(
666 "main",
667 gpu::RenderTargetSet {
668 colors: &[gpu::RenderTarget {
669 view: frame.texture_view(),
670 init_op: gpu::InitOp::Clear(gpu::TextureColor::TransparentBlack),
671 finish_op: gpu::FinishOp::Store,
672 }],
673 depth_stencil: None,
674 },
675 );
676
677 profiling::scope!("render pass");
678 for batch in scene.batches() {
679 match batch {
680 PrimitiveBatch::Quads(quads) => {
681 let instance_buf = unsafe { self.instance_belt.alloc_typed(quads, &self.gpu) };
682 let mut encoder = pass.with(&self.pipelines.quads);
683 encoder.bind(
684 0,
685 &ShaderQuadsData {
686 globals,
687 b_quads: instance_buf,
688 },
689 );
690 encoder.draw(0, 4, 0, quads.len() as u32);
691 }
692 PrimitiveBatch::Shadows(shadows) => {
693 let instance_buf =
694 unsafe { self.instance_belt.alloc_typed(shadows, &self.gpu) };
695 let mut encoder = pass.with(&self.pipelines.shadows);
696 encoder.bind(
697 0,
698 &ShaderShadowsData {
699 globals,
700 b_shadows: instance_buf,
701 },
702 );
703 encoder.draw(0, 4, 0, shadows.len() as u32);
704 }
705 PrimitiveBatch::Paths(paths) => {
706 let Some(first_path) = paths.first() else {
707 continue;
708 };
709 drop(pass);
710 self.draw_paths_to_intermediate(
711 paths,
712 self.surface_config.size.width as f32,
713 self.surface_config.size.height as f32,
714 );
715 pass = self.command_encoder.render(
716 "main",
717 gpu::RenderTargetSet {
718 colors: &[gpu::RenderTarget {
719 view: frame.texture_view(),
720 init_op: gpu::InitOp::Load,
721 finish_op: gpu::FinishOp::Store,
722 }],
723 depth_stencil: None,
724 },
725 );
726 let mut encoder = pass.with(&self.pipelines.paths);
727 // When copying paths from the intermediate texture to the drawable,
728 // each pixel must only be copied once, in case of transparent paths.
729 //
730 // If all paths have the same draw order, then their bounds are all
731 // disjoint, so we can copy each path's bounds individually. If this
732 // batch combines different draw orders, we perform a single copy
733 // for a minimal spanning rect.
734 let sprites = if paths.last().unwrap().order == first_path.order {
735 paths
736 .iter()
737 .map(|path| PathSprite {
738 bounds: path.clipped_bounds(),
739 })
740 .collect()
741 } else {
742 let mut bounds = first_path.clipped_bounds();
743 for path in paths.iter().skip(1) {
744 bounds = bounds.union(&path.clipped_bounds());
745 }
746 vec![PathSprite { bounds }]
747 };
748 let instance_buf =
749 unsafe { self.instance_belt.alloc_typed(&sprites, &self.gpu) };
750 encoder.bind(
751 0,
752 &ShaderPathsData {
753 globals,
754 t_sprite: self.path_intermediate_texture_view,
755 s_sprite: self.atlas_sampler,
756 b_path_sprites: instance_buf,
757 },
758 );
759 encoder.draw(0, 4, 0, sprites.len() as u32);
760 }
761 PrimitiveBatch::Underlines(underlines) => {
762 let instance_buf =
763 unsafe { self.instance_belt.alloc_typed(underlines, &self.gpu) };
764 let mut encoder = pass.with(&self.pipelines.underlines);
765 encoder.bind(
766 0,
767 &ShaderUnderlinesData {
768 globals,
769 b_underlines: instance_buf,
770 },
771 );
772 encoder.draw(0, 4, 0, underlines.len() as u32);
773 }
774 PrimitiveBatch::MonochromeSprites {
775 texture_id,
776 sprites,
777 } => {
778 let tex_info = self.atlas.get_texture_info(texture_id);
779 let instance_buf =
780 unsafe { self.instance_belt.alloc_typed(sprites, &self.gpu) };
781 let mut encoder = pass.with(&self.pipelines.mono_sprites);
782 encoder.bind(
783 0,
784 &ShaderMonoSpritesData {
785 globals,
786 t_sprite: tex_info.raw_view,
787 s_sprite: self.atlas_sampler,
788 b_mono_sprites: instance_buf,
789 },
790 );
791 encoder.draw(0, 4, 0, sprites.len() as u32);
792 }
793 PrimitiveBatch::PolychromeSprites {
794 texture_id,
795 sprites,
796 } => {
797 let tex_info = self.atlas.get_texture_info(texture_id);
798 let instance_buf =
799 unsafe { self.instance_belt.alloc_typed(sprites, &self.gpu) };
800 let mut encoder = pass.with(&self.pipelines.poly_sprites);
801 encoder.bind(
802 0,
803 &ShaderPolySpritesData {
804 globals,
805 t_sprite: tex_info.raw_view,
806 s_sprite: self.atlas_sampler,
807 b_poly_sprites: instance_buf,
808 },
809 );
810 encoder.draw(0, 4, 0, sprites.len() as u32);
811 }
812 PrimitiveBatch::Surfaces(surfaces) => {
813 let mut _encoder = pass.with(&self.pipelines.surfaces);
814
815 for surface in surfaces {
816 #[cfg(not(target_os = "macos"))]
817 {
818 let _ = surface;
819 continue;
820 };
821
822 #[cfg(target_os = "macos")]
823 {
824 let (t_y, t_cb_cr) = unsafe {
825 use core_foundation::base::TCFType as _;
826 use std::ptr;
827
828 assert_eq!(
829 surface.image_buffer.get_pixel_format(),
830 core_video::pixel_buffer::kCVPixelFormatType_420YpCbCr8BiPlanarFullRange
831 );
832
833 let y_texture = self
834 .core_video_texture_cache
835 .create_texture_from_image(
836 surface.image_buffer.as_concrete_TypeRef(),
837 ptr::null(),
838 metal::MTLPixelFormat::R8Unorm,
839 surface.image_buffer.get_width_of_plane(0),
840 surface.image_buffer.get_height_of_plane(0),
841 0,
842 )
843 .unwrap();
844 let cb_cr_texture = self
845 .core_video_texture_cache
846 .create_texture_from_image(
847 surface.image_buffer.as_concrete_TypeRef(),
848 ptr::null(),
849 metal::MTLPixelFormat::RG8Unorm,
850 surface.image_buffer.get_width_of_plane(1),
851 surface.image_buffer.get_height_of_plane(1),
852 1,
853 )
854 .unwrap();
855 (
856 gpu::TextureView::from_metal_texture(
857 &objc2::rc::Retained::retain(
858 foreign_types::ForeignTypeRef::as_ptr(
859 y_texture.as_texture_ref(),
860 )
861 as *mut objc2::runtime::ProtocolObject<
862 dyn objc2_metal::MTLTexture,
863 >,
864 )
865 .unwrap(),
866 gpu::TexelAspects::COLOR,
867 ),
868 gpu::TextureView::from_metal_texture(
869 &objc2::rc::Retained::retain(
870 foreign_types::ForeignTypeRef::as_ptr(
871 cb_cr_texture.as_texture_ref(),
872 )
873 as *mut objc2::runtime::ProtocolObject<
874 dyn objc2_metal::MTLTexture,
875 >,
876 )
877 .unwrap(),
878 gpu::TexelAspects::COLOR,
879 ),
880 )
881 };
882
883 _encoder.bind(
884 0,
885 &ShaderSurfacesData {
886 globals,
887 surface_locals: SurfaceParams {
888 bounds: surface.bounds.into(),
889 content_mask: surface.content_mask.bounds.into(),
890 },
891 t_y,
892 t_cb_cr,
893 s_surface: self.atlas_sampler,
894 },
895 );
896
897 _encoder.draw(0, 4, 0, 1);
898 }
899 }
900 }
901 }
902 }
903 drop(pass);
904
905 self.command_encoder.present(frame);
906 let sync_point = self.gpu.submit(&mut self.command_encoder);
907
908 profiling::scope!("finish");
909 self.instance_belt.flush(&sync_point);
910 self.atlas.after_frame(&sync_point);
911
912 self.wait_for_gpu();
913 self.last_sync_point = Some(sync_point);
914 }
915}
916
917fn create_path_intermediate_texture(
918 gpu: &gpu::Context,
919 format: gpu::TextureFormat,
920 width: u32,
921 height: u32,
922) -> (gpu::Texture, gpu::TextureView) {
923 let texture = gpu.create_texture(gpu::TextureDesc {
924 name: "path intermediate",
925 format,
926 size: gpu::Extent {
927 width,
928 height,
929 depth: 1,
930 },
931 array_layer_count: 1,
932 mip_level_count: 1,
933 sample_count: 1,
934 dimension: gpu::TextureDimension::D2,
935 usage: gpu::TextureUsage::COPY | gpu::TextureUsage::RESOURCE | gpu::TextureUsage::TARGET,
936 external: None,
937 });
938 let texture_view = gpu.create_texture_view(
939 texture,
940 gpu::TextureViewDesc {
941 name: "path intermediate view",
942 format,
943 dimension: gpu::ViewDimension::D2,
944 subresources: &Default::default(),
945 },
946 );
947 (texture, texture_view)
948}
949
950fn create_msaa_texture_if_needed(
951 gpu: &gpu::Context,
952 format: gpu::TextureFormat,
953 width: u32,
954 height: u32,
955 sample_count: u32,
956) -> Option<(gpu::Texture, gpu::TextureView)> {
957 if sample_count <= 1 {
958 return None;
959 }
960 let texture_msaa = gpu.create_texture(gpu::TextureDesc {
961 name: "path intermediate msaa",
962 format,
963 size: gpu::Extent {
964 width,
965 height,
966 depth: 1,
967 },
968 array_layer_count: 1,
969 mip_level_count: 1,
970 sample_count,
971 dimension: gpu::TextureDimension::D2,
972 usage: gpu::TextureUsage::TARGET,
973 external: None,
974 });
975 let texture_view_msaa = gpu.create_texture_view(
976 texture_msaa,
977 gpu::TextureViewDesc {
978 name: "path intermediate msaa view",
979 format,
980 dimension: gpu::ViewDimension::D2,
981 subresources: &Default::default(),
982 },
983 );
984
985 Some((texture_msaa, texture_view_msaa))
986}