1use super::sprite_cache::SpriteCache;
2use crate::{
3 color::ColorU,
4 geometry::{
5 rect::RectF,
6 vector::{vec2f, vec2i, Vector2F, Vector2I},
7 },
8 platform,
9 scene::Layer,
10 Scene,
11};
12use anyhow::{anyhow, Result};
13use cocoa::foundation::NSUInteger;
14use metal::{MTLResourceOptions, NSRange};
15use shaders::{ToFloat2 as _, ToUchar4 as _};
16use std::{collections::HashMap, ffi::c_void, mem, sync::Arc};
17
18const SHADERS_METALLIB: &'static [u8] =
19 include_bytes!(concat!(env!("OUT_DIR"), "/shaders.metallib"));
20const INSTANCE_BUFFER_SIZE: usize = 1024 * 1024; // This is an arbitrary decision. There's probably a more optimal value.
21
22struct RenderContext<'a> {
23 drawable_size: Vector2F,
24 command_encoder: &'a metal::RenderCommandEncoderRef,
25}
26
27pub struct Renderer {
28 sprite_cache: SpriteCache,
29 quad_pipeline_state: metal::RenderPipelineState,
30 shadow_pipeline_state: metal::RenderPipelineState,
31 sprite_pipeline_state: metal::RenderPipelineState,
32 unit_vertices: metal::Buffer,
33 instances: metal::Buffer,
34 paths_texture: metal::Texture,
35}
36
37impl Renderer {
38 pub fn new(
39 device: metal::Device,
40 pixel_format: metal::MTLPixelFormat,
41 fonts: Arc<dyn platform::FontSystem>,
42 ) -> Result<Self> {
43 let library = device
44 .new_library_with_data(SHADERS_METALLIB)
45 .map_err(|message| anyhow!("error building metal library: {}", message))?;
46
47 let unit_vertices = [
48 (0., 0.).to_float2(),
49 (1., 0.).to_float2(),
50 (0., 1.).to_float2(),
51 (0., 1.).to_float2(),
52 (1., 0.).to_float2(),
53 (1., 1.).to_float2(),
54 ];
55 let unit_vertices = device.new_buffer_with_data(
56 unit_vertices.as_ptr() as *const c_void,
57 (unit_vertices.len() * mem::size_of::<shaders::vector_float2>()) as u64,
58 MTLResourceOptions::StorageModeManaged,
59 );
60 let instances = device.new_buffer(
61 INSTANCE_BUFFER_SIZE as u64,
62 MTLResourceOptions::StorageModeManaged,
63 );
64
65 let paths_texture_size = vec2f(2048., 2048.);
66 let descriptor = metal::TextureDescriptor::new();
67 descriptor.set_pixel_format(metal::MTLPixelFormat::A8Unorm);
68 descriptor.set_width(paths_texture_size.x() as u64);
69 descriptor.set_height(paths_texture_size.y() as u64);
70 let paths_texture = device.new_texture(&descriptor);
71
72 let atlas_size: Vector2I = vec2i(1024, 768);
73 Ok(Self {
74 sprite_cache: SpriteCache::new(device.clone(), atlas_size, fonts),
75 quad_pipeline_state: build_pipeline_state(
76 &device,
77 &library,
78 "quad",
79 "quad_vertex",
80 "quad_fragment",
81 pixel_format,
82 )?,
83 shadow_pipeline_state: build_pipeline_state(
84 &device,
85 &library,
86 "shadow",
87 "shadow_vertex",
88 "shadow_fragment",
89 pixel_format,
90 )?,
91 sprite_pipeline_state: build_pipeline_state(
92 &device,
93 &library,
94 "sprite",
95 "sprite_vertex",
96 "sprite_fragment",
97 pixel_format,
98 )?,
99 unit_vertices,
100 instances,
101 paths_texture,
102 })
103 }
104
105 pub fn render(
106 &mut self,
107 scene: &Scene,
108 drawable_size: Vector2F,
109 device: &metal::DeviceRef,
110 command_buffer: &metal::CommandBufferRef,
111 output: &metal::TextureRef,
112 ) {
113 let render_pass_descriptor = metal::RenderPassDescriptor::new();
114 let color_attachment = render_pass_descriptor
115 .color_attachments()
116 .object_at(0)
117 .unwrap();
118 color_attachment.set_texture(Some(output));
119 color_attachment.set_load_action(metal::MTLLoadAction::Clear);
120 color_attachment.set_store_action(metal::MTLStoreAction::Store);
121 color_attachment.set_clear_color(metal::MTLClearColor::new(0., 0., 0., 1.));
122 let command_encoder = command_buffer.new_render_command_encoder(render_pass_descriptor);
123
124 command_encoder.set_viewport(metal::MTLViewport {
125 originX: 0.0,
126 originY: 0.0,
127 width: drawable_size.x() as f64,
128 height: drawable_size.y() as f64,
129 znear: 0.0,
130 zfar: 1.0,
131 });
132
133 let ctx = RenderContext {
134 drawable_size,
135 command_encoder,
136 };
137 let mut offset = 0;
138 for layer in scene.layers() {
139 self.clip(scene, layer, &ctx);
140 self.render_shadows(scene, layer, &mut offset, &ctx);
141 self.render_quads(scene, layer, &mut offset, &ctx);
142 self.render_sprites(scene, layer, &mut offset, &ctx);
143 }
144
145 command_encoder.end_encoding();
146 }
147
148 fn clip(&mut self, scene: &Scene, layer: &Layer, ctx: &RenderContext) {
149 let clip_bounds = layer.clip_bounds().unwrap_or(RectF::new(
150 vec2f(0., 0.),
151 ctx.drawable_size / scene.scale_factor(),
152 )) * scene.scale_factor();
153 ctx.command_encoder.set_scissor_rect(metal::MTLScissorRect {
154 x: clip_bounds.origin_x() as NSUInteger,
155 y: clip_bounds.origin_y() as NSUInteger,
156 width: clip_bounds.width() as NSUInteger,
157 height: clip_bounds.height() as NSUInteger,
158 });
159 }
160
161 fn render_shadows(
162 &mut self,
163 scene: &Scene,
164 layer: &Layer,
165 offset: &mut usize,
166 ctx: &RenderContext,
167 ) {
168 if layer.shadows().is_empty() {
169 return;
170 }
171
172 align_offset(offset);
173 let next_offset = *offset + layer.shadows().len() * mem::size_of::<shaders::GPUIShadow>();
174 assert!(
175 next_offset <= INSTANCE_BUFFER_SIZE,
176 "instance buffer exhausted"
177 );
178
179 ctx.command_encoder
180 .set_render_pipeline_state(&self.shadow_pipeline_state);
181 ctx.command_encoder.set_vertex_buffer(
182 shaders::GPUIShadowInputIndex_GPUIShadowInputIndexVertices as u64,
183 Some(&self.unit_vertices),
184 0,
185 );
186 ctx.command_encoder.set_vertex_buffer(
187 shaders::GPUIShadowInputIndex_GPUIShadowInputIndexShadows as u64,
188 Some(&self.instances),
189 *offset as u64,
190 );
191 ctx.command_encoder.set_vertex_bytes(
192 shaders::GPUIShadowInputIndex_GPUIShadowInputIndexUniforms as u64,
193 mem::size_of::<shaders::GPUIUniforms>() as u64,
194 [shaders::GPUIUniforms {
195 viewport_size: ctx.drawable_size.to_float2(),
196 }]
197 .as_ptr() as *const c_void,
198 );
199
200 let buffer_contents = unsafe {
201 (self.instances.contents() as *mut u8).offset(*offset as isize)
202 as *mut shaders::GPUIShadow
203 };
204 for (ix, shadow) in layer.shadows().iter().enumerate() {
205 let shape_bounds = shadow.bounds * scene.scale_factor();
206 let shader_shadow = shaders::GPUIShadow {
207 origin: shape_bounds.origin().to_float2(),
208 size: shape_bounds.size().to_float2(),
209 corner_radius: shadow.corner_radius * scene.scale_factor(),
210 sigma: shadow.sigma,
211 color: shadow.color.to_uchar4(),
212 };
213 unsafe {
214 *(buffer_contents.offset(ix as isize)) = shader_shadow;
215 }
216 }
217
218 self.instances.did_modify_range(NSRange {
219 location: *offset as u64,
220 length: (next_offset - *offset) as u64,
221 });
222 *offset = next_offset;
223
224 ctx.command_encoder.draw_primitives_instanced(
225 metal::MTLPrimitiveType::Triangle,
226 0,
227 6,
228 layer.shadows().len() as u64,
229 );
230 }
231
232 fn render_quads(
233 &mut self,
234 scene: &Scene,
235 layer: &Layer,
236 offset: &mut usize,
237 ctx: &RenderContext,
238 ) {
239 if layer.quads().is_empty() {
240 return;
241 }
242 align_offset(offset);
243 let next_offset = *offset + layer.quads().len() * mem::size_of::<shaders::GPUIQuad>();
244 assert!(
245 next_offset <= INSTANCE_BUFFER_SIZE,
246 "instance buffer exhausted"
247 );
248
249 ctx.command_encoder
250 .set_render_pipeline_state(&self.quad_pipeline_state);
251 ctx.command_encoder.set_vertex_buffer(
252 shaders::GPUIQuadInputIndex_GPUIQuadInputIndexVertices as u64,
253 Some(&self.unit_vertices),
254 0,
255 );
256 ctx.command_encoder.set_vertex_buffer(
257 shaders::GPUIQuadInputIndex_GPUIQuadInputIndexQuads as u64,
258 Some(&self.instances),
259 *offset as u64,
260 );
261 ctx.command_encoder.set_vertex_bytes(
262 shaders::GPUIQuadInputIndex_GPUIQuadInputIndexUniforms as u64,
263 mem::size_of::<shaders::GPUIUniforms>() as u64,
264 [shaders::GPUIUniforms {
265 viewport_size: ctx.drawable_size.to_float2(),
266 }]
267 .as_ptr() as *const c_void,
268 );
269
270 let buffer_contents = unsafe {
271 (self.instances.contents() as *mut u8).offset(*offset as isize)
272 as *mut shaders::GPUIQuad
273 };
274 for (ix, quad) in layer.quads().iter().enumerate() {
275 let bounds = quad.bounds * scene.scale_factor();
276 let border_width = quad.border.width * scene.scale_factor();
277 let shader_quad = shaders::GPUIQuad {
278 origin: bounds.origin().to_float2(),
279 size: bounds.size().to_float2(),
280 background_color: quad
281 .background
282 .unwrap_or(ColorU::transparent_black())
283 .to_uchar4(),
284 border_top: border_width * (quad.border.top as usize as f32),
285 border_right: border_width * (quad.border.right as usize as f32),
286 border_bottom: border_width * (quad.border.bottom as usize as f32),
287 border_left: border_width * (quad.border.left as usize as f32),
288 border_color: quad
289 .border
290 .color
291 .unwrap_or(ColorU::transparent_black())
292 .to_uchar4(),
293 corner_radius: quad.corner_radius * scene.scale_factor(),
294 };
295 unsafe {
296 *(buffer_contents.offset(ix as isize)) = shader_quad;
297 }
298 }
299
300 self.instances.did_modify_range(NSRange {
301 location: *offset as u64,
302 length: (next_offset - *offset) as u64,
303 });
304 *offset = next_offset;
305
306 ctx.command_encoder.draw_primitives_instanced(
307 metal::MTLPrimitiveType::Triangle,
308 0,
309 6,
310 layer.quads().len() as u64,
311 );
312 }
313
314 fn render_paths(
315 &mut self,
316 scene: &Scene,
317 layer: &Layer,
318 offset: &mut usize,
319 ctx: &RenderContext,
320 ) {
321 }
322
323 fn render_sprites(
324 &mut self,
325 scene: &Scene,
326 layer: &Layer,
327 offset: &mut usize,
328 ctx: &RenderContext,
329 ) {
330 if layer.glyphs().is_empty() {
331 return;
332 }
333
334 let mut sprites_by_atlas = HashMap::new();
335 for glyph in layer.glyphs() {
336 if let Some(sprite) = self.sprite_cache.render_glyph(
337 glyph.font_id,
338 glyph.font_size,
339 glyph.id,
340 glyph.origin,
341 scene.scale_factor(),
342 ) {
343 // Snap sprite to pixel grid.
344 let origin = (glyph.origin * scene.scale_factor()).floor() + sprite.offset.to_f32();
345 sprites_by_atlas
346 .entry(sprite.atlas_id)
347 .or_insert_with(Vec::new)
348 .push(shaders::GPUISprite {
349 origin: origin.to_float2(),
350 size: sprite.size.to_float2(),
351 atlas_origin: sprite.atlas_origin.to_float2(),
352 color: glyph.color.to_uchar4(),
353 });
354 }
355 }
356
357 ctx.command_encoder
358 .set_render_pipeline_state(&self.sprite_pipeline_state);
359 ctx.command_encoder.set_vertex_buffer(
360 shaders::GPUISpriteVertexInputIndex_GPUISpriteVertexInputIndexVertices as u64,
361 Some(&self.unit_vertices),
362 0,
363 );
364 ctx.command_encoder.set_vertex_bytes(
365 shaders::GPUISpriteVertexInputIndex_GPUISpriteVertexInputIndexViewportSize as u64,
366 mem::size_of::<shaders::vector_float2>() as u64,
367 [ctx.drawable_size.to_float2()].as_ptr() as *const c_void,
368 );
369 ctx.command_encoder.set_vertex_bytes(
370 shaders::GPUISpriteVertexInputIndex_GPUISpriteVertexInputIndexAtlasSize as u64,
371 mem::size_of::<shaders::vector_float2>() as u64,
372 [self.sprite_cache.atlas_size().to_float2()].as_ptr() as *const c_void,
373 );
374
375 for (atlas_id, sprites) in sprites_by_atlas {
376 align_offset(offset);
377 let next_offset = *offset + sprites.len() * mem::size_of::<shaders::GPUISprite>();
378 assert!(
379 next_offset <= INSTANCE_BUFFER_SIZE,
380 "instance buffer exhausted"
381 );
382
383 ctx.command_encoder.set_vertex_buffer(
384 shaders::GPUISpriteVertexInputIndex_GPUISpriteVertexInputIndexSprites as u64,
385 Some(&self.instances),
386 *offset as u64,
387 );
388
389 let texture = self.sprite_cache.atlas_texture(atlas_id).unwrap();
390 ctx.command_encoder.set_fragment_texture(
391 shaders::GPUISpriteFragmentInputIndex_GPUISpriteFragmentInputIndexAtlas as u64,
392 Some(texture),
393 );
394
395 unsafe {
396 let buffer_contents = (self.instances.contents() as *mut u8)
397 .offset(*offset as isize)
398 as *mut shaders::GPUISprite;
399 std::ptr::copy_nonoverlapping(sprites.as_ptr(), buffer_contents, sprites.len());
400 }
401 self.instances.did_modify_range(NSRange {
402 location: *offset as u64,
403 length: (next_offset - *offset) as u64,
404 });
405 *offset = next_offset;
406
407 ctx.command_encoder.draw_primitives_instanced(
408 metal::MTLPrimitiveType::Triangle,
409 0,
410 6,
411 sprites.len() as u64,
412 );
413 }
414 }
415}
416
417fn align_offset(offset: &mut usize) {
418 let r = *offset % 256;
419 if r > 0 {
420 *offset += 256 - r; // Align to a multiple of 256 to make Metal happy
421 }
422}
423
424fn build_pipeline_state(
425 device: &metal::DeviceRef,
426 library: &metal::LibraryRef,
427 label: &str,
428 vertex_fn_name: &str,
429 fragment_fn_name: &str,
430 pixel_format: metal::MTLPixelFormat,
431) -> Result<metal::RenderPipelineState> {
432 let vertex_fn = library
433 .get_function(vertex_fn_name, None)
434 .map_err(|message| anyhow!("error locating vertex function: {}", message))?;
435 let fragment_fn = library
436 .get_function(fragment_fn_name, None)
437 .map_err(|message| anyhow!("error locating fragment function: {}", message))?;
438
439 let descriptor = metal::RenderPipelineDescriptor::new();
440 descriptor.set_label(label);
441 descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
442 descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
443 let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
444 color_attachment.set_pixel_format(pixel_format);
445 color_attachment.set_blending_enabled(true);
446 color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
447 color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
448 color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::SourceAlpha);
449 color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::SourceAlpha);
450 color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
451 color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
452
453 device
454 .new_render_pipeline_state(&descriptor)
455 .map_err(|message| anyhow!("could not create render pipeline state: {}", message))
456}
457
458mod shaders {
459 #![allow(non_upper_case_globals)]
460 #![allow(non_camel_case_types)]
461 #![allow(non_snake_case)]
462
463 use pathfinder_geometry::vector::Vector2I;
464
465 use crate::{color::ColorU, geometry::vector::Vector2F};
466 use std::mem;
467
468 include!(concat!(env!("OUT_DIR"), "/shaders.rs"));
469
470 pub trait ToFloat2 {
471 fn to_float2(&self) -> vector_float2;
472 }
473
474 pub trait ToUchar4 {
475 fn to_uchar4(&self) -> vector_uchar4;
476 }
477
478 impl ToFloat2 for (f32, f32) {
479 fn to_float2(&self) -> vector_float2 {
480 unsafe {
481 let mut output = mem::transmute::<_, u32>(self.1.to_bits()) as vector_float2;
482 output <<= 32;
483 output |= mem::transmute::<_, u32>(self.0.to_bits()) as vector_float2;
484 output
485 }
486 }
487 }
488
489 impl ToFloat2 for Vector2F {
490 fn to_float2(&self) -> vector_float2 {
491 unsafe {
492 let mut output = mem::transmute::<_, u32>(self.y().to_bits()) as vector_float2;
493 output <<= 32;
494 output |= mem::transmute::<_, u32>(self.x().to_bits()) as vector_float2;
495 output
496 }
497 }
498 }
499
500 impl ToFloat2 for Vector2I {
501 fn to_float2(&self) -> vector_float2 {
502 self.to_f32().to_float2()
503 }
504 }
505
506 impl ToUchar4 for ColorU {
507 fn to_uchar4(&self) -> vector_uchar4 {
508 let mut vec = self.a as vector_uchar4;
509 vec <<= 8;
510 vec |= self.b as vector_uchar4;
511 vec <<= 8;
512 vec |= self.g as vector_uchar4;
513 vec <<= 8;
514 vec |= self.r as vector_uchar4;
515 vec
516 }
517 }
518}