1use super::{sprite_cache::SpriteCache, window::RenderContext};
2use crate::{
3 color::ColorU,
4 geometry::vector::{vec2i, Vector2I},
5 scene::Layer,
6 FontCache, Scene,
7};
8use anyhow::{anyhow, Result};
9use metal::{MTLResourceOptions, NSRange};
10use shaders::{ToFloat2 as _, ToUchar4 as _};
11use std::{collections::HashMap, ffi::c_void, mem, sync::Arc};
12
13const SHADERS_METALLIB: &'static [u8] =
14 include_bytes!(concat!(env!("OUT_DIR"), "/shaders.metallib"));
15const INSTANCE_BUFFER_SIZE: usize = 1024 * 1024; // This is an arbitrary decision. There's probably a more optimal value.
16
17pub struct Renderer {
18 sprite_cache: SpriteCache,
19 quad_pipeline_state: metal::RenderPipelineState,
20 shadow_pipeline_state: metal::RenderPipelineState,
21 sprite_pipeline_state: metal::RenderPipelineState,
22 unit_vertices: metal::Buffer,
23 instances: metal::Buffer,
24}
25
26impl Renderer {
27 pub fn new(
28 device: metal::Device,
29 pixel_format: metal::MTLPixelFormat,
30 font_cache: Arc<FontCache>,
31 ) -> Result<Self> {
32 let library = device
33 .new_library_with_data(SHADERS_METALLIB)
34 .map_err(|message| anyhow!("error building metal library: {}", message))?;
35
36 let unit_vertices = [
37 (0., 0.).to_float2(),
38 (1., 0.).to_float2(),
39 (0., 1.).to_float2(),
40 (0., 1.).to_float2(),
41 (1., 0.).to_float2(),
42 (1., 1.).to_float2(),
43 ];
44 let unit_vertices = device.new_buffer_with_data(
45 unit_vertices.as_ptr() as *const c_void,
46 (unit_vertices.len() * mem::size_of::<shaders::vector_float2>()) as u64,
47 MTLResourceOptions::StorageModeManaged,
48 );
49 let instances = device.new_buffer(
50 INSTANCE_BUFFER_SIZE as u64,
51 MTLResourceOptions::StorageModeManaged,
52 );
53
54 let atlas_size: Vector2I = vec2i(1024, 768);
55 Ok(Self {
56 sprite_cache: SpriteCache::new(device.clone(), atlas_size, font_cache),
57 quad_pipeline_state: build_pipeline_state(
58 &device,
59 &library,
60 "quad",
61 "quad_vertex",
62 "quad_fragment",
63 pixel_format,
64 )?,
65 shadow_pipeline_state: build_pipeline_state(
66 &device,
67 &library,
68 "shadow",
69 "shadow_vertex",
70 "shadow_fragment",
71 pixel_format,
72 )?,
73 sprite_pipeline_state: build_pipeline_state(
74 &device,
75 &library,
76 "sprite",
77 "sprite_vertex",
78 "sprite_fragment",
79 pixel_format,
80 )?,
81 unit_vertices,
82 instances,
83 })
84 }
85
86 pub fn render(&mut self, scene: &Scene, ctx: &RenderContext) {
87 ctx.command_encoder.set_viewport(metal::MTLViewport {
88 originX: 0.0,
89 originY: 0.0,
90 width: ctx.drawable_size.x() as f64,
91 height: ctx.drawable_size.y() as f64,
92 znear: 0.0,
93 zfar: 1.0,
94 });
95
96 let mut offset = 0;
97 for layer in scene.layers() {
98 self.render_shadows(scene, layer, &mut offset, ctx);
99 self.render_quads(scene, layer, &mut offset, ctx);
100 self.render_sprites(scene, layer, &mut offset, ctx);
101 }
102 }
103
104 fn render_shadows(
105 &mut self,
106 scene: &Scene,
107 layer: &Layer,
108 offset: &mut usize,
109 ctx: &RenderContext,
110 ) {
111 if layer.shadows().is_empty() {
112 return;
113 }
114
115 align_offset(offset);
116 let next_offset = *offset + layer.shadows().len() * mem::size_of::<shaders::GPUIShadow>();
117 assert!(
118 next_offset <= INSTANCE_BUFFER_SIZE,
119 "instance buffer exhausted"
120 );
121
122 ctx.command_encoder
123 .set_render_pipeline_state(&self.shadow_pipeline_state);
124 ctx.command_encoder.set_vertex_buffer(
125 shaders::GPUIShadowInputIndex_GPUIShadowInputIndexVertices as u64,
126 Some(&self.unit_vertices),
127 0,
128 );
129 ctx.command_encoder.set_vertex_buffer(
130 shaders::GPUIShadowInputIndex_GPUIShadowInputIndexShadows as u64,
131 Some(&self.instances),
132 *offset as u64,
133 );
134 ctx.command_encoder.set_vertex_bytes(
135 shaders::GPUIShadowInputIndex_GPUIShadowInputIndexUniforms as u64,
136 mem::size_of::<shaders::GPUIUniforms>() as u64,
137 [shaders::GPUIUniforms {
138 viewport_size: ctx.drawable_size.to_float2(),
139 }]
140 .as_ptr() as *const c_void,
141 );
142
143 let buffer_contents = unsafe {
144 (self.instances.contents() as *mut u8).offset(*offset as isize)
145 as *mut shaders::GPUIShadow
146 };
147 for (ix, shadow) in layer.shadows().iter().enumerate() {
148 let shape_bounds = shadow.bounds * scene.scale_factor();
149 let shader_shadow = shaders::GPUIShadow {
150 origin: shape_bounds.origin().to_float2(),
151 size: shape_bounds.size().to_float2(),
152 corner_radius: shadow.corner_radius * scene.scale_factor(),
153 sigma: shadow.sigma,
154 color: shadow.color.to_uchar4(),
155 };
156 unsafe {
157 *(buffer_contents.offset(ix as isize)) = shader_shadow;
158 }
159 }
160
161 self.instances.did_modify_range(NSRange {
162 location: *offset as u64,
163 length: (next_offset - *offset) as u64,
164 });
165 *offset = next_offset;
166
167 ctx.command_encoder.draw_primitives_instanced(
168 metal::MTLPrimitiveType::Triangle,
169 0,
170 6,
171 layer.shadows().len() as u64,
172 );
173 }
174
175 fn render_quads(
176 &mut self,
177 scene: &Scene,
178 layer: &Layer,
179 offset: &mut usize,
180 ctx: &RenderContext,
181 ) {
182 if layer.quads().is_empty() {
183 return;
184 }
185 align_offset(offset);
186 let next_offset = *offset + layer.quads().len() * mem::size_of::<shaders::GPUIQuad>();
187 assert!(
188 next_offset <= INSTANCE_BUFFER_SIZE,
189 "instance buffer exhausted"
190 );
191
192 ctx.command_encoder
193 .set_render_pipeline_state(&self.quad_pipeline_state);
194 ctx.command_encoder.set_vertex_buffer(
195 shaders::GPUIQuadInputIndex_GPUIQuadInputIndexVertices as u64,
196 Some(&self.unit_vertices),
197 0,
198 );
199 ctx.command_encoder.set_vertex_buffer(
200 shaders::GPUIQuadInputIndex_GPUIQuadInputIndexQuads as u64,
201 Some(&self.instances),
202 *offset as u64,
203 );
204 ctx.command_encoder.set_vertex_bytes(
205 shaders::GPUIQuadInputIndex_GPUIQuadInputIndexUniforms as u64,
206 mem::size_of::<shaders::GPUIUniforms>() as u64,
207 [shaders::GPUIUniforms {
208 viewport_size: ctx.drawable_size.to_float2(),
209 }]
210 .as_ptr() as *const c_void,
211 );
212
213 let buffer_contents = unsafe {
214 (self.instances.contents() as *mut u8).offset(*offset as isize)
215 as *mut shaders::GPUIQuad
216 };
217 for (ix, quad) in layer.quads().iter().enumerate() {
218 let bounds = quad.bounds * scene.scale_factor();
219 let border_width = quad.border.width * scene.scale_factor();
220 let shader_quad = shaders::GPUIQuad {
221 origin: bounds.origin().to_float2(),
222 size: bounds.size().to_float2(),
223 background_color: quad
224 .background
225 .unwrap_or(ColorU::transparent_black())
226 .to_uchar4(),
227 border_top: border_width * (quad.border.top as usize as f32),
228 border_right: border_width * (quad.border.right as usize as f32),
229 border_bottom: border_width * (quad.border.bottom as usize as f32),
230 border_left: border_width * (quad.border.left as usize as f32),
231 border_color: quad
232 .border
233 .color
234 .unwrap_or(ColorU::transparent_black())
235 .to_uchar4(),
236 corner_radius: quad.corner_radius * scene.scale_factor(),
237 };
238 unsafe {
239 *(buffer_contents.offset(ix as isize)) = shader_quad;
240 }
241 }
242
243 self.instances.did_modify_range(NSRange {
244 location: *offset as u64,
245 length: (next_offset - *offset) as u64,
246 });
247 *offset = next_offset;
248
249 ctx.command_encoder.draw_primitives_instanced(
250 metal::MTLPrimitiveType::Triangle,
251 0,
252 6,
253 layer.quads().len() as u64,
254 );
255 }
256
257 fn render_sprites(
258 &mut self,
259 scene: &Scene,
260 layer: &Layer,
261 offset: &mut usize,
262 ctx: &RenderContext,
263 ) {
264 if layer.glyphs().is_empty() {
265 return;
266 }
267
268 let mut sprites_by_atlas = HashMap::new();
269 for glyph in layer.glyphs() {
270 if let Some((atlas, bounds)) = self.sprite_cache.render_glyph(
271 glyph.font_id,
272 glyph.font_size,
273 glyph.id,
274 scene.scale_factor(),
275 ) {
276 sprites_by_atlas
277 .entry(atlas)
278 .or_insert_with(Vec::new)
279 .push(shaders::GPUISprite {
280 origin: (glyph.origin * scene.scale_factor()).to_float2(),
281 size: bounds.size().to_float2(),
282 atlas_origin: bounds.origin().to_float2(),
283 color: glyph.color.to_uchar4(),
284 });
285 }
286 }
287
288 ctx.command_encoder
289 .set_render_pipeline_state(&self.sprite_pipeline_state);
290 ctx.command_encoder.set_vertex_buffer(
291 shaders::GPUISpriteVertexInputIndex_GPUISpriteVertexInputIndexVertices as u64,
292 Some(&self.unit_vertices),
293 0,
294 );
295 ctx.command_encoder.set_vertex_bytes(
296 shaders::GPUISpriteVertexInputIndex_GPUISpriteVertexInputIndexViewportSize as u64,
297 mem::size_of::<shaders::vector_float2>() as u64,
298 [ctx.drawable_size.to_float2()].as_ptr() as *const c_void,
299 );
300 ctx.command_encoder.set_vertex_bytes(
301 shaders::GPUISpriteVertexInputIndex_GPUISpriteVertexInputIndexAtlasSize as u64,
302 mem::size_of::<shaders::vector_float2>() as u64,
303 [self.sprite_cache.atlas_size().to_float2()].as_ptr() as *const c_void,
304 );
305
306 for (atlas_id, sprites) in sprites_by_atlas {
307 align_offset(offset);
308 let next_offset = *offset + sprites.len() * mem::size_of::<shaders::GPUISprite>();
309 assert!(
310 next_offset <= INSTANCE_BUFFER_SIZE,
311 "instance buffer exhausted"
312 );
313
314 ctx.command_encoder.set_vertex_buffer(
315 shaders::GPUISpriteVertexInputIndex_GPUISpriteVertexInputIndexSprites as u64,
316 Some(&self.instances),
317 *offset as u64,
318 );
319
320 let texture = self.sprite_cache.atlas_texture(atlas_id).unwrap();
321 ctx.command_encoder.set_fragment_texture(
322 shaders::GPUISpriteFragmentInputIndex_GPUISpriteFragmentInputIndexAtlas as u64,
323 Some(texture),
324 );
325
326 unsafe {
327 let buffer_contents = (self.instances.contents() as *mut u8)
328 .offset(*offset as isize)
329 as *mut shaders::GPUISprite;
330 std::ptr::copy_nonoverlapping(sprites.as_ptr(), buffer_contents, sprites.len());
331 }
332 self.instances.did_modify_range(NSRange {
333 location: *offset as u64,
334 length: (next_offset - *offset) as u64,
335 });
336 *offset = next_offset;
337
338 ctx.command_encoder.draw_primitives_instanced(
339 metal::MTLPrimitiveType::Triangle,
340 0,
341 6,
342 sprites.len() as u64,
343 );
344 }
345 }
346}
347
348fn align_offset(offset: &mut usize) {
349 let r = *offset % 256;
350 if r > 0 {
351 *offset += 256 - r; // Align to a multiple of 256 to make Metal happy
352 }
353}
354
355fn build_pipeline_state(
356 device: &metal::DeviceRef,
357 library: &metal::LibraryRef,
358 label: &str,
359 vertex_fn_name: &str,
360 fragment_fn_name: &str,
361 pixel_format: metal::MTLPixelFormat,
362) -> Result<metal::RenderPipelineState> {
363 let vertex_fn = library
364 .get_function(vertex_fn_name, None)
365 .map_err(|message| anyhow!("error locating vertex function: {}", message))?;
366 let fragment_fn = library
367 .get_function(fragment_fn_name, None)
368 .map_err(|message| anyhow!("error locating fragment function: {}", message))?;
369
370 let descriptor = metal::RenderPipelineDescriptor::new();
371 descriptor.set_label(label);
372 descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
373 descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
374 let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
375 color_attachment.set_pixel_format(pixel_format);
376 color_attachment.set_blending_enabled(true);
377 color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
378 color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
379 color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::SourceAlpha);
380 color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::SourceAlpha);
381 color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
382 color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
383
384 device
385 .new_render_pipeline_state(&descriptor)
386 .map_err(|message| anyhow!("could not create render pipeline state: {}", message))
387}
388
389mod shaders {
390 #![allow(non_upper_case_globals)]
391 #![allow(non_camel_case_types)]
392 #![allow(non_snake_case)]
393
394 use pathfinder_geometry::vector::Vector2I;
395
396 use crate::{color::ColorU, geometry::vector::Vector2F};
397 use std::mem;
398
399 include!(concat!(env!("OUT_DIR"), "/shaders.rs"));
400
401 pub trait ToFloat2 {
402 fn to_float2(&self) -> vector_float2;
403 }
404
405 pub trait ToUchar4 {
406 fn to_uchar4(&self) -> vector_uchar4;
407 }
408
409 impl ToFloat2 for (f32, f32) {
410 fn to_float2(&self) -> vector_float2 {
411 unsafe {
412 let mut output = mem::transmute::<_, u32>(self.1.to_bits()) as vector_float2;
413 output <<= 32;
414 output |= mem::transmute::<_, u32>(self.0.to_bits()) as vector_float2;
415 output
416 }
417 }
418 }
419
420 impl ToFloat2 for Vector2F {
421 fn to_float2(&self) -> vector_float2 {
422 unsafe {
423 let mut output = mem::transmute::<_, u32>(self.y().to_bits()) as vector_float2;
424 output <<= 32;
425 output |= mem::transmute::<_, u32>(self.x().to_bits()) as vector_float2;
426 output
427 }
428 }
429 }
430
431 impl ToFloat2 for Vector2I {
432 fn to_float2(&self) -> vector_float2 {
433 self.to_f32().to_float2()
434 }
435 }
436
437 impl ToUchar4 for ColorU {
438 fn to_uchar4(&self) -> vector_uchar4 {
439 let mut vec = self.a as vector_uchar4;
440 vec <<= 8;
441 vec |= self.b as vector_uchar4;
442 vec <<= 8;
443 vec |= self.g as vector_uchar4;
444 vec <<= 8;
445 vec |= self.r as vector_uchar4;
446 vec
447 }
448 }
449}