1use super::{sprite_cache::SpriteCache, window::RenderContext};
2use crate::{
3 color::ColorU,
4 geometry::vector::{vec2i, Vector2I},
5 scene::Layer,
6 FontCache, Scene,
7};
8use anyhow::{anyhow, Result};
9use metal::{MTLResourceOptions, NSRange};
10use shaders::{ToFloat2 as _, ToUchar4 as _};
11use std::{collections::HashMap, ffi::c_void, mem, sync::Arc};
12
13const SHADERS_METALLIB: &'static [u8] =
14 include_bytes!(concat!(env!("OUT_DIR"), "/shaders.metallib"));
15const INSTANCE_BUFFER_SIZE: usize = 1024 * 1024; // This is an arbitrary decision. There's probably a more optimal value.
16
17pub struct Renderer {
18 sprite_cache: SpriteCache,
19 quad_pipeline_state: metal::RenderPipelineState,
20 shadow_pipeline_state: metal::RenderPipelineState,
21 sprite_pipeline_state: metal::RenderPipelineState,
22 unit_vertices: metal::Buffer,
23 instances: metal::Buffer,
24}
25
26impl Renderer {
27 pub fn new(
28 device: metal::Device,
29 pixel_format: metal::MTLPixelFormat,
30 font_cache: Arc<FontCache>,
31 ) -> Result<Self> {
32 let library = device
33 .new_library_with_data(SHADERS_METALLIB)
34 .map_err(|message| anyhow!("error building metal library: {}", message))?;
35
36 let unit_vertices = [
37 (0., 0.).to_float2(),
38 (1., 0.).to_float2(),
39 (0., 1.).to_float2(),
40 (0., 1.).to_float2(),
41 (1., 0.).to_float2(),
42 (1., 1.).to_float2(),
43 ];
44 let unit_vertices = device.new_buffer_with_data(
45 unit_vertices.as_ptr() as *const c_void,
46 (unit_vertices.len() * mem::size_of::<shaders::vector_float2>()) as u64,
47 MTLResourceOptions::StorageModeManaged,
48 );
49 let instances = device.new_buffer(
50 INSTANCE_BUFFER_SIZE as u64,
51 MTLResourceOptions::StorageModeManaged,
52 );
53
54 let atlas_size: Vector2I = vec2i(1024, 768);
55 Ok(Self {
56 sprite_cache: SpriteCache::new(device.clone(), atlas_size, font_cache),
57 quad_pipeline_state: build_pipeline_state(
58 &device,
59 &library,
60 "quad",
61 "quad_vertex",
62 "quad_fragment",
63 pixel_format,
64 )?,
65 shadow_pipeline_state: build_pipeline_state(
66 &device,
67 &library,
68 "shadow",
69 "shadow_vertex",
70 "shadow_fragment",
71 pixel_format,
72 )?,
73 sprite_pipeline_state: build_pipeline_state(
74 &device,
75 &library,
76 "sprite",
77 "sprite_vertex",
78 "sprite_fragment",
79 pixel_format,
80 )?,
81 unit_vertices,
82 instances,
83 })
84 }
85
86 pub fn render(&mut self, scene: &Scene, ctx: &RenderContext) {
87 ctx.command_encoder.set_viewport(metal::MTLViewport {
88 originX: 0.0,
89 originY: 0.0,
90 width: ctx.drawable_size.x() as f64,
91 height: ctx.drawable_size.y() as f64,
92 znear: 0.0,
93 zfar: 1.0,
94 });
95
96 let mut offset = 0;
97 for layer in scene.layers() {
98 self.render_shadows(scene, layer, &mut offset, ctx);
99 self.render_quads(scene, layer, &mut offset, ctx);
100 self.render_sprites(scene, layer, &mut offset, ctx);
101 }
102 }
103
104 fn render_shadows(
105 &mut self,
106 scene: &Scene,
107 layer: &Layer,
108 offset: &mut usize,
109 ctx: &RenderContext,
110 ) {
111 if layer.shadows().is_empty() {
112 return;
113 }
114
115 align_offset(offset);
116 let next_offset = *offset + layer.shadows().len() * mem::size_of::<shaders::GPUIShadow>();
117 assert!(
118 next_offset <= INSTANCE_BUFFER_SIZE,
119 "instance buffer exhausted"
120 );
121
122 ctx.command_encoder
123 .set_render_pipeline_state(&self.shadow_pipeline_state);
124 ctx.command_encoder.set_vertex_buffer(
125 shaders::GPUIShadowInputIndex_GPUIShadowInputIndexVertices as u64,
126 Some(&self.unit_vertices),
127 0,
128 );
129 ctx.command_encoder.set_vertex_buffer(
130 shaders::GPUIShadowInputIndex_GPUIShadowInputIndexShadows as u64,
131 Some(&self.instances),
132 *offset as u64,
133 );
134 ctx.command_encoder.set_vertex_bytes(
135 shaders::GPUIShadowInputIndex_GPUIShadowInputIndexUniforms as u64,
136 mem::size_of::<shaders::GPUIUniforms>() as u64,
137 [shaders::GPUIUniforms {
138 viewport_size: ctx.drawable_size.to_float2(),
139 }]
140 .as_ptr() as *const c_void,
141 );
142
143 let buffer_contents = unsafe {
144 (self.instances.contents() as *mut u8).offset(*offset as isize)
145 as *mut shaders::GPUIShadow
146 };
147 for (ix, shadow) in layer.shadows().iter().enumerate() {
148 let shape_bounds = shadow.bounds * scene.scale_factor();
149 let shader_shadow = shaders::GPUIShadow {
150 origin: shape_bounds.origin().to_float2(),
151 size: shape_bounds.size().to_float2(),
152 corner_radius: shadow.corner_radius * scene.scale_factor(),
153 sigma: shadow.sigma,
154 color: shadow.color.to_uchar4(),
155 };
156 unsafe {
157 *(buffer_contents.offset(ix as isize)) = shader_shadow;
158 }
159 }
160
161 self.instances.did_modify_range(NSRange {
162 location: *offset as u64,
163 length: (next_offset - *offset) as u64,
164 });
165 *offset = next_offset;
166
167 ctx.command_encoder.draw_primitives_instanced(
168 metal::MTLPrimitiveType::Triangle,
169 0,
170 6,
171 layer.shadows().len() as u64,
172 );
173 }
174
175 fn render_quads(
176 &mut self,
177 scene: &Scene,
178 layer: &Layer,
179 offset: &mut usize,
180 ctx: &RenderContext,
181 ) {
182 if layer.quads().is_empty() {
183 return;
184 }
185 align_offset(offset);
186 let next_offset = *offset + layer.quads().len() * mem::size_of::<shaders::GPUIQuad>();
187 assert!(
188 next_offset <= INSTANCE_BUFFER_SIZE,
189 "instance buffer exhausted"
190 );
191
192 ctx.command_encoder
193 .set_render_pipeline_state(&self.quad_pipeline_state);
194 ctx.command_encoder.set_vertex_buffer(
195 shaders::GPUIQuadInputIndex_GPUIQuadInputIndexVertices as u64,
196 Some(&self.unit_vertices),
197 0,
198 );
199 ctx.command_encoder.set_vertex_buffer(
200 shaders::GPUIQuadInputIndex_GPUIQuadInputIndexQuads as u64,
201 Some(&self.instances),
202 *offset as u64,
203 );
204 ctx.command_encoder.set_vertex_bytes(
205 shaders::GPUIQuadInputIndex_GPUIQuadInputIndexUniforms as u64,
206 mem::size_of::<shaders::GPUIUniforms>() as u64,
207 [shaders::GPUIUniforms {
208 viewport_size: ctx.drawable_size.to_float2(),
209 }]
210 .as_ptr() as *const c_void,
211 );
212
213 let buffer_contents = unsafe {
214 (self.instances.contents() as *mut u8).offset(*offset as isize)
215 as *mut shaders::GPUIQuad
216 };
217 for (ix, quad) in layer.quads().iter().enumerate() {
218 let bounds = quad.bounds * scene.scale_factor();
219 let border_width = quad.border.width * scene.scale_factor();
220 let shader_quad = shaders::GPUIQuad {
221 origin: bounds.origin().to_float2(),
222 size: bounds.size().to_float2(),
223 background_color: quad
224 .background
225 .unwrap_or(ColorU::transparent_black())
226 .to_uchar4(),
227 border_top: border_width * (quad.border.top as usize as f32),
228 border_right: border_width * (quad.border.right as usize as f32),
229 border_bottom: border_width * (quad.border.bottom as usize as f32),
230 border_left: border_width * (quad.border.left as usize as f32),
231 border_color: quad
232 .border
233 .color
234 .unwrap_or(ColorU::transparent_black())
235 .to_uchar4(),
236 corner_radius: quad.corner_radius * scene.scale_factor(),
237 };
238 unsafe {
239 *(buffer_contents.offset(ix as isize)) = shader_quad;
240 }
241 }
242
243 self.instances.did_modify_range(NSRange {
244 location: *offset as u64,
245 length: (next_offset - *offset) as u64,
246 });
247 *offset = next_offset;
248
249 ctx.command_encoder.draw_primitives_instanced(
250 metal::MTLPrimitiveType::Triangle,
251 0,
252 6,
253 layer.quads().len() as u64,
254 );
255 }
256
257 fn render_sprites(
258 &mut self,
259 scene: &Scene,
260 layer: &Layer,
261 offset: &mut usize,
262 ctx: &RenderContext,
263 ) {
264 if layer.glyphs().is_empty() {
265 return;
266 }
267
268 let mut sprites_by_atlas = HashMap::new();
269 for glyph in layer.glyphs() {
270 if let Some(sprite) = self.sprite_cache.render_glyph(
271 glyph.font_id,
272 glyph.font_size,
273 glyph.id,
274 scene.scale_factor(),
275 ) {
276 sprites_by_atlas
277 .entry(sprite.atlas_id)
278 .or_insert_with(Vec::new)
279 .push(shaders::GPUISprite {
280 origin: (glyph.origin * scene.scale_factor() + sprite.offset.to_f32())
281 .to_float2(),
282 size: sprite.size.to_float2(),
283 atlas_origin: sprite.atlas_origin.to_float2(),
284 color: glyph.color.to_uchar4(),
285 });
286 }
287 }
288
289 ctx.command_encoder
290 .set_render_pipeline_state(&self.sprite_pipeline_state);
291 ctx.command_encoder.set_vertex_buffer(
292 shaders::GPUISpriteVertexInputIndex_GPUISpriteVertexInputIndexVertices as u64,
293 Some(&self.unit_vertices),
294 0,
295 );
296 ctx.command_encoder.set_vertex_bytes(
297 shaders::GPUISpriteVertexInputIndex_GPUISpriteVertexInputIndexViewportSize as u64,
298 mem::size_of::<shaders::vector_float2>() as u64,
299 [ctx.drawable_size.to_float2()].as_ptr() as *const c_void,
300 );
301 ctx.command_encoder.set_vertex_bytes(
302 shaders::GPUISpriteVertexInputIndex_GPUISpriteVertexInputIndexAtlasSize as u64,
303 mem::size_of::<shaders::vector_float2>() as u64,
304 [self.sprite_cache.atlas_size().to_float2()].as_ptr() as *const c_void,
305 );
306
307 for (atlas_id, sprites) in sprites_by_atlas {
308 align_offset(offset);
309 let next_offset = *offset + sprites.len() * mem::size_of::<shaders::GPUISprite>();
310 assert!(
311 next_offset <= INSTANCE_BUFFER_SIZE,
312 "instance buffer exhausted"
313 );
314
315 ctx.command_encoder.set_vertex_buffer(
316 shaders::GPUISpriteVertexInputIndex_GPUISpriteVertexInputIndexSprites as u64,
317 Some(&self.instances),
318 *offset as u64,
319 );
320
321 let texture = self.sprite_cache.atlas_texture(atlas_id).unwrap();
322 ctx.command_encoder.set_fragment_texture(
323 shaders::GPUISpriteFragmentInputIndex_GPUISpriteFragmentInputIndexAtlas as u64,
324 Some(texture),
325 );
326
327 unsafe {
328 let buffer_contents = (self.instances.contents() as *mut u8)
329 .offset(*offset as isize)
330 as *mut shaders::GPUISprite;
331 std::ptr::copy_nonoverlapping(sprites.as_ptr(), buffer_contents, sprites.len());
332 }
333 self.instances.did_modify_range(NSRange {
334 location: *offset as u64,
335 length: (next_offset - *offset) as u64,
336 });
337 *offset = next_offset;
338
339 ctx.command_encoder.draw_primitives_instanced(
340 metal::MTLPrimitiveType::Triangle,
341 0,
342 6,
343 sprites.len() as u64,
344 );
345 }
346 }
347}
348
349fn align_offset(offset: &mut usize) {
350 let r = *offset % 256;
351 if r > 0 {
352 *offset += 256 - r; // Align to a multiple of 256 to make Metal happy
353 }
354}
355
356fn build_pipeline_state(
357 device: &metal::DeviceRef,
358 library: &metal::LibraryRef,
359 label: &str,
360 vertex_fn_name: &str,
361 fragment_fn_name: &str,
362 pixel_format: metal::MTLPixelFormat,
363) -> Result<metal::RenderPipelineState> {
364 let vertex_fn = library
365 .get_function(vertex_fn_name, None)
366 .map_err(|message| anyhow!("error locating vertex function: {}", message))?;
367 let fragment_fn = library
368 .get_function(fragment_fn_name, None)
369 .map_err(|message| anyhow!("error locating fragment function: {}", message))?;
370
371 let descriptor = metal::RenderPipelineDescriptor::new();
372 descriptor.set_label(label);
373 descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
374 descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
375 let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
376 color_attachment.set_pixel_format(pixel_format);
377 color_attachment.set_blending_enabled(true);
378 color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
379 color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
380 color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::SourceAlpha);
381 color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::SourceAlpha);
382 color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
383 color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
384
385 device
386 .new_render_pipeline_state(&descriptor)
387 .map_err(|message| anyhow!("could not create render pipeline state: {}", message))
388}
389
390mod shaders {
391 #![allow(non_upper_case_globals)]
392 #![allow(non_camel_case_types)]
393 #![allow(non_snake_case)]
394
395 use pathfinder_geometry::vector::Vector2I;
396
397 use crate::{color::ColorU, geometry::vector::Vector2F};
398 use std::mem;
399
400 include!(concat!(env!("OUT_DIR"), "/shaders.rs"));
401
402 pub trait ToFloat2 {
403 fn to_float2(&self) -> vector_float2;
404 }
405
406 pub trait ToUchar4 {
407 fn to_uchar4(&self) -> vector_uchar4;
408 }
409
410 impl ToFloat2 for (f32, f32) {
411 fn to_float2(&self) -> vector_float2 {
412 unsafe {
413 let mut output = mem::transmute::<_, u32>(self.1.to_bits()) as vector_float2;
414 output <<= 32;
415 output |= mem::transmute::<_, u32>(self.0.to_bits()) as vector_float2;
416 output
417 }
418 }
419 }
420
421 impl ToFloat2 for Vector2F {
422 fn to_float2(&self) -> vector_float2 {
423 unsafe {
424 let mut output = mem::transmute::<_, u32>(self.y().to_bits()) as vector_float2;
425 output <<= 32;
426 output |= mem::transmute::<_, u32>(self.x().to_bits()) as vector_float2;
427 output
428 }
429 }
430 }
431
432 impl ToFloat2 for Vector2I {
433 fn to_float2(&self) -> vector_float2 {
434 self.to_f32().to_float2()
435 }
436 }
437
438 impl ToUchar4 for ColorU {
439 fn to_uchar4(&self) -> vector_uchar4 {
440 let mut vec = self.a as vector_uchar4;
441 vec <<= 8;
442 vec |= self.b as vector_uchar4;
443 vec <<= 8;
444 vec |= self.g as vector_uchar4;
445 vec <<= 8;
446 vec |= self.r as vector_uchar4;
447 vec
448 }
449 }
450}