1use super::{sprite_cache::SpriteCache, window::RenderContext};
2use crate::{
3 color::ColorU,
4 geometry::vector::{vec2i, Vector2I},
5 platform,
6 scene::Layer,
7 Scene,
8};
9use anyhow::{anyhow, Result};
10use metal::{MTLResourceOptions, NSRange};
11use shaders::{ToFloat2 as _, ToUchar4 as _};
12use std::{collections::HashMap, ffi::c_void, mem, sync::Arc};
13
14const SHADERS_METALLIB: &'static [u8] =
15 include_bytes!(concat!(env!("OUT_DIR"), "/shaders.metallib"));
16const INSTANCE_BUFFER_SIZE: usize = 1024 * 1024; // This is an arbitrary decision. There's probably a more optimal value.
17
18pub struct Renderer {
19 sprite_cache: SpriteCache,
20 quad_pipeline_state: metal::RenderPipelineState,
21 shadow_pipeline_state: metal::RenderPipelineState,
22 sprite_pipeline_state: metal::RenderPipelineState,
23 unit_vertices: metal::Buffer,
24 instances: metal::Buffer,
25}
26
27impl Renderer {
28 pub fn new(
29 device: metal::Device,
30 pixel_format: metal::MTLPixelFormat,
31 fonts: Arc<dyn platform::FontSystem>,
32 ) -> Result<Self> {
33 let library = device
34 .new_library_with_data(SHADERS_METALLIB)
35 .map_err(|message| anyhow!("error building metal library: {}", message))?;
36
37 let unit_vertices = [
38 (0., 0.).to_float2(),
39 (1., 0.).to_float2(),
40 (0., 1.).to_float2(),
41 (0., 1.).to_float2(),
42 (1., 0.).to_float2(),
43 (1., 1.).to_float2(),
44 ];
45 let unit_vertices = device.new_buffer_with_data(
46 unit_vertices.as_ptr() as *const c_void,
47 (unit_vertices.len() * mem::size_of::<shaders::vector_float2>()) as u64,
48 MTLResourceOptions::StorageModeManaged,
49 );
50 let instances = device.new_buffer(
51 INSTANCE_BUFFER_SIZE as u64,
52 MTLResourceOptions::StorageModeManaged,
53 );
54
55 let atlas_size: Vector2I = vec2i(1024, 768);
56 Ok(Self {
57 sprite_cache: SpriteCache::new(device.clone(), atlas_size, fonts),
58 quad_pipeline_state: build_pipeline_state(
59 &device,
60 &library,
61 "quad",
62 "quad_vertex",
63 "quad_fragment",
64 pixel_format,
65 )?,
66 shadow_pipeline_state: build_pipeline_state(
67 &device,
68 &library,
69 "shadow",
70 "shadow_vertex",
71 "shadow_fragment",
72 pixel_format,
73 )?,
74 sprite_pipeline_state: build_pipeline_state(
75 &device,
76 &library,
77 "sprite",
78 "sprite_vertex",
79 "sprite_fragment",
80 pixel_format,
81 )?,
82 unit_vertices,
83 instances,
84 })
85 }
86
87 pub fn render(&mut self, scene: &Scene, ctx: &RenderContext) {
88 ctx.command_encoder.set_viewport(metal::MTLViewport {
89 originX: 0.0,
90 originY: 0.0,
91 width: ctx.drawable_size.x() as f64,
92 height: ctx.drawable_size.y() as f64,
93 znear: 0.0,
94 zfar: 1.0,
95 });
96
97 let mut offset = 0;
98 for layer in scene.layers() {
99 self.render_shadows(scene, layer, &mut offset, ctx);
100 self.render_quads(scene, layer, &mut offset, ctx);
101 self.render_sprites(scene, layer, &mut offset, ctx);
102 }
103 }
104
105 fn render_shadows(
106 &mut self,
107 scene: &Scene,
108 layer: &Layer,
109 offset: &mut usize,
110 ctx: &RenderContext,
111 ) {
112 if layer.shadows().is_empty() {
113 return;
114 }
115
116 align_offset(offset);
117 let next_offset = *offset + layer.shadows().len() * mem::size_of::<shaders::GPUIShadow>();
118 assert!(
119 next_offset <= INSTANCE_BUFFER_SIZE,
120 "instance buffer exhausted"
121 );
122
123 ctx.command_encoder
124 .set_render_pipeline_state(&self.shadow_pipeline_state);
125 ctx.command_encoder.set_vertex_buffer(
126 shaders::GPUIShadowInputIndex_GPUIShadowInputIndexVertices as u64,
127 Some(&self.unit_vertices),
128 0,
129 );
130 ctx.command_encoder.set_vertex_buffer(
131 shaders::GPUIShadowInputIndex_GPUIShadowInputIndexShadows as u64,
132 Some(&self.instances),
133 *offset as u64,
134 );
135 ctx.command_encoder.set_vertex_bytes(
136 shaders::GPUIShadowInputIndex_GPUIShadowInputIndexUniforms as u64,
137 mem::size_of::<shaders::GPUIUniforms>() as u64,
138 [shaders::GPUIUniforms {
139 viewport_size: ctx.drawable_size.to_float2(),
140 }]
141 .as_ptr() as *const c_void,
142 );
143
144 let buffer_contents = unsafe {
145 (self.instances.contents() as *mut u8).offset(*offset as isize)
146 as *mut shaders::GPUIShadow
147 };
148 for (ix, shadow) in layer.shadows().iter().enumerate() {
149 let shape_bounds = shadow.bounds * scene.scale_factor();
150 let shader_shadow = shaders::GPUIShadow {
151 origin: shape_bounds.origin().to_float2(),
152 size: shape_bounds.size().to_float2(),
153 corner_radius: shadow.corner_radius * scene.scale_factor(),
154 sigma: shadow.sigma,
155 color: shadow.color.to_uchar4(),
156 };
157 unsafe {
158 *(buffer_contents.offset(ix as isize)) = shader_shadow;
159 }
160 }
161
162 self.instances.did_modify_range(NSRange {
163 location: *offset as u64,
164 length: (next_offset - *offset) as u64,
165 });
166 *offset = next_offset;
167
168 ctx.command_encoder.draw_primitives_instanced(
169 metal::MTLPrimitiveType::Triangle,
170 0,
171 6,
172 layer.shadows().len() as u64,
173 );
174 }
175
176 fn render_quads(
177 &mut self,
178 scene: &Scene,
179 layer: &Layer,
180 offset: &mut usize,
181 ctx: &RenderContext,
182 ) {
183 if layer.quads().is_empty() {
184 return;
185 }
186 align_offset(offset);
187 let next_offset = *offset + layer.quads().len() * mem::size_of::<shaders::GPUIQuad>();
188 assert!(
189 next_offset <= INSTANCE_BUFFER_SIZE,
190 "instance buffer exhausted"
191 );
192
193 ctx.command_encoder
194 .set_render_pipeline_state(&self.quad_pipeline_state);
195 ctx.command_encoder.set_vertex_buffer(
196 shaders::GPUIQuadInputIndex_GPUIQuadInputIndexVertices as u64,
197 Some(&self.unit_vertices),
198 0,
199 );
200 ctx.command_encoder.set_vertex_buffer(
201 shaders::GPUIQuadInputIndex_GPUIQuadInputIndexQuads as u64,
202 Some(&self.instances),
203 *offset as u64,
204 );
205 ctx.command_encoder.set_vertex_bytes(
206 shaders::GPUIQuadInputIndex_GPUIQuadInputIndexUniforms as u64,
207 mem::size_of::<shaders::GPUIUniforms>() as u64,
208 [shaders::GPUIUniforms {
209 viewport_size: ctx.drawable_size.to_float2(),
210 }]
211 .as_ptr() as *const c_void,
212 );
213
214 let buffer_contents = unsafe {
215 (self.instances.contents() as *mut u8).offset(*offset as isize)
216 as *mut shaders::GPUIQuad
217 };
218 for (ix, quad) in layer.quads().iter().enumerate() {
219 let bounds = quad.bounds * scene.scale_factor();
220 let border_width = quad.border.width * scene.scale_factor();
221 let shader_quad = shaders::GPUIQuad {
222 origin: bounds.origin().to_float2(),
223 size: bounds.size().to_float2(),
224 background_color: quad
225 .background
226 .unwrap_or(ColorU::transparent_black())
227 .to_uchar4(),
228 border_top: border_width * (quad.border.top as usize as f32),
229 border_right: border_width * (quad.border.right as usize as f32),
230 border_bottom: border_width * (quad.border.bottom as usize as f32),
231 border_left: border_width * (quad.border.left as usize as f32),
232 border_color: quad
233 .border
234 .color
235 .unwrap_or(ColorU::transparent_black())
236 .to_uchar4(),
237 corner_radius: quad.corner_radius * scene.scale_factor(),
238 };
239 unsafe {
240 *(buffer_contents.offset(ix as isize)) = shader_quad;
241 }
242 }
243
244 self.instances.did_modify_range(NSRange {
245 location: *offset as u64,
246 length: (next_offset - *offset) as u64,
247 });
248 *offset = next_offset;
249
250 ctx.command_encoder.draw_primitives_instanced(
251 metal::MTLPrimitiveType::Triangle,
252 0,
253 6,
254 layer.quads().len() as u64,
255 );
256 }
257
258 fn render_sprites(
259 &mut self,
260 scene: &Scene,
261 layer: &Layer,
262 offset: &mut usize,
263 ctx: &RenderContext,
264 ) {
265 if layer.glyphs().is_empty() {
266 return;
267 }
268
269 let mut sprites_by_atlas = HashMap::new();
270 for glyph in layer.glyphs() {
271 if let Some(sprite) = self.sprite_cache.render_glyph(
272 glyph.font_id,
273 glyph.font_size,
274 glyph.id,
275 glyph.origin,
276 scene.scale_factor(),
277 ) {
278 sprites_by_atlas
279 .entry(sprite.atlas_id)
280 .or_insert_with(Vec::new)
281 .push(shaders::GPUISprite {
282 origin: (glyph.origin * scene.scale_factor() + sprite.offset).to_float2(),
283 size: sprite.size.to_float2(),
284 atlas_origin: sprite.atlas_origin.to_float2(),
285 color: glyph.color.to_uchar4(),
286 });
287 }
288 }
289
290 ctx.command_encoder
291 .set_render_pipeline_state(&self.sprite_pipeline_state);
292 ctx.command_encoder.set_vertex_buffer(
293 shaders::GPUISpriteVertexInputIndex_GPUISpriteVertexInputIndexVertices as u64,
294 Some(&self.unit_vertices),
295 0,
296 );
297 ctx.command_encoder.set_vertex_bytes(
298 shaders::GPUISpriteVertexInputIndex_GPUISpriteVertexInputIndexViewportSize as u64,
299 mem::size_of::<shaders::vector_float2>() as u64,
300 [ctx.drawable_size.to_float2()].as_ptr() as *const c_void,
301 );
302 ctx.command_encoder.set_vertex_bytes(
303 shaders::GPUISpriteVertexInputIndex_GPUISpriteVertexInputIndexAtlasSize as u64,
304 mem::size_of::<shaders::vector_float2>() as u64,
305 [self.sprite_cache.atlas_size().to_float2()].as_ptr() as *const c_void,
306 );
307
308 for (atlas_id, sprites) in sprites_by_atlas {
309 align_offset(offset);
310 let next_offset = *offset + sprites.len() * mem::size_of::<shaders::GPUISprite>();
311 assert!(
312 next_offset <= INSTANCE_BUFFER_SIZE,
313 "instance buffer exhausted"
314 );
315
316 ctx.command_encoder.set_vertex_buffer(
317 shaders::GPUISpriteVertexInputIndex_GPUISpriteVertexInputIndexSprites as u64,
318 Some(&self.instances),
319 *offset as u64,
320 );
321
322 let texture = self.sprite_cache.atlas_texture(atlas_id).unwrap();
323 ctx.command_encoder.set_fragment_texture(
324 shaders::GPUISpriteFragmentInputIndex_GPUISpriteFragmentInputIndexAtlas as u64,
325 Some(texture),
326 );
327
328 unsafe {
329 let buffer_contents = (self.instances.contents() as *mut u8)
330 .offset(*offset as isize)
331 as *mut shaders::GPUISprite;
332 std::ptr::copy_nonoverlapping(sprites.as_ptr(), buffer_contents, sprites.len());
333 }
334 self.instances.did_modify_range(NSRange {
335 location: *offset as u64,
336 length: (next_offset - *offset) as u64,
337 });
338 *offset = next_offset;
339
340 ctx.command_encoder.draw_primitives_instanced(
341 metal::MTLPrimitiveType::Triangle,
342 0,
343 6,
344 sprites.len() as u64,
345 );
346 }
347 }
348}
349
350fn align_offset(offset: &mut usize) {
351 let r = *offset % 256;
352 if r > 0 {
353 *offset += 256 - r; // Align to a multiple of 256 to make Metal happy
354 }
355}
356
357fn build_pipeline_state(
358 device: &metal::DeviceRef,
359 library: &metal::LibraryRef,
360 label: &str,
361 vertex_fn_name: &str,
362 fragment_fn_name: &str,
363 pixel_format: metal::MTLPixelFormat,
364) -> Result<metal::RenderPipelineState> {
365 let vertex_fn = library
366 .get_function(vertex_fn_name, None)
367 .map_err(|message| anyhow!("error locating vertex function: {}", message))?;
368 let fragment_fn = library
369 .get_function(fragment_fn_name, None)
370 .map_err(|message| anyhow!("error locating fragment function: {}", message))?;
371
372 let descriptor = metal::RenderPipelineDescriptor::new();
373 descriptor.set_label(label);
374 descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
375 descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
376 let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
377 color_attachment.set_pixel_format(pixel_format);
378 color_attachment.set_blending_enabled(true);
379 color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
380 color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
381 color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::SourceAlpha);
382 color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::SourceAlpha);
383 color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
384 color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
385
386 device
387 .new_render_pipeline_state(&descriptor)
388 .map_err(|message| anyhow!("could not create render pipeline state: {}", message))
389}
390
391mod shaders {
392 #![allow(non_upper_case_globals)]
393 #![allow(non_camel_case_types)]
394 #![allow(non_snake_case)]
395
396 use pathfinder_geometry::vector::Vector2I;
397
398 use crate::{color::ColorU, geometry::vector::Vector2F};
399 use std::mem;
400
401 include!(concat!(env!("OUT_DIR"), "/shaders.rs"));
402
403 pub trait ToFloat2 {
404 fn to_float2(&self) -> vector_float2;
405 }
406
407 pub trait ToUchar4 {
408 fn to_uchar4(&self) -> vector_uchar4;
409 }
410
411 impl ToFloat2 for (f32, f32) {
412 fn to_float2(&self) -> vector_float2 {
413 unsafe {
414 let mut output = mem::transmute::<_, u32>(self.1.to_bits()) as vector_float2;
415 output <<= 32;
416 output |= mem::transmute::<_, u32>(self.0.to_bits()) as vector_float2;
417 output
418 }
419 }
420 }
421
422 impl ToFloat2 for Vector2F {
423 fn to_float2(&self) -> vector_float2 {
424 unsafe {
425 let mut output = mem::transmute::<_, u32>(self.y().to_bits()) as vector_float2;
426 output <<= 32;
427 output |= mem::transmute::<_, u32>(self.x().to_bits()) as vector_float2;
428 output
429 }
430 }
431 }
432
433 impl ToFloat2 for Vector2I {
434 fn to_float2(&self) -> vector_float2 {
435 self.to_f32().to_float2()
436 }
437 }
438
439 impl ToUchar4 for ColorU {
440 fn to_uchar4(&self) -> vector_uchar4 {
441 let mut vec = self.a as vector_uchar4;
442 vec <<= 8;
443 vec |= self.b as vector_uchar4;
444 vec <<= 8;
445 vec |= self.g as vector_uchar4;
446 vec <<= 8;
447 vec |= self.r as vector_uchar4;
448 vec
449 }
450 }
451}