renderer.rs

  1use super::{sprite_cache::SpriteCache, window::RenderContext};
  2use crate::{
  3    color::ColorU,
  4    geometry::{
  5        rect::RectF,
  6        vector::{vec2f, vec2i, Vector2I},
  7    },
  8    platform,
  9    scene::Layer,
 10    Scene,
 11};
 12use anyhow::{anyhow, Result};
 13use cocoa::foundation::NSUInteger;
 14use metal::{MTLResourceOptions, NSRange};
 15use shaders::{ToFloat2 as _, ToUchar4 as _};
 16use std::{collections::HashMap, ffi::c_void, mem, sync::Arc};
 17
 18const SHADERS_METALLIB: &'static [u8] =
 19    include_bytes!(concat!(env!("OUT_DIR"), "/shaders.metallib"));
 20const INSTANCE_BUFFER_SIZE: usize = 1024 * 1024; // This is an arbitrary decision. There's probably a more optimal value.
 21
 22pub struct Renderer {
 23    sprite_cache: SpriteCache,
 24    quad_pipeline_state: metal::RenderPipelineState,
 25    shadow_pipeline_state: metal::RenderPipelineState,
 26    sprite_pipeline_state: metal::RenderPipelineState,
 27    unit_vertices: metal::Buffer,
 28    instances: metal::Buffer,
 29}
 30
 31impl Renderer {
 32    pub fn new(
 33        device: metal::Device,
 34        pixel_format: metal::MTLPixelFormat,
 35        fonts: Arc<dyn platform::FontSystem>,
 36    ) -> Result<Self> {
 37        let library = device
 38            .new_library_with_data(SHADERS_METALLIB)
 39            .map_err(|message| anyhow!("error building metal library: {}", message))?;
 40
 41        let unit_vertices = [
 42            (0., 0.).to_float2(),
 43            (1., 0.).to_float2(),
 44            (0., 1.).to_float2(),
 45            (0., 1.).to_float2(),
 46            (1., 0.).to_float2(),
 47            (1., 1.).to_float2(),
 48        ];
 49        let unit_vertices = device.new_buffer_with_data(
 50            unit_vertices.as_ptr() as *const c_void,
 51            (unit_vertices.len() * mem::size_of::<shaders::vector_float2>()) as u64,
 52            MTLResourceOptions::StorageModeManaged,
 53        );
 54        let instances = device.new_buffer(
 55            INSTANCE_BUFFER_SIZE as u64,
 56            MTLResourceOptions::StorageModeManaged,
 57        );
 58
 59        let atlas_size: Vector2I = vec2i(1024, 768);
 60        Ok(Self {
 61            sprite_cache: SpriteCache::new(device.clone(), atlas_size, fonts),
 62            quad_pipeline_state: build_pipeline_state(
 63                &device,
 64                &library,
 65                "quad",
 66                "quad_vertex",
 67                "quad_fragment",
 68                pixel_format,
 69            )?,
 70            shadow_pipeline_state: build_pipeline_state(
 71                &device,
 72                &library,
 73                "shadow",
 74                "shadow_vertex",
 75                "shadow_fragment",
 76                pixel_format,
 77            )?,
 78            sprite_pipeline_state: build_pipeline_state(
 79                &device,
 80                &library,
 81                "sprite",
 82                "sprite_vertex",
 83                "sprite_fragment",
 84                pixel_format,
 85            )?,
 86            unit_vertices,
 87            instances,
 88        })
 89    }
 90
 91    pub fn render(&mut self, scene: &Scene, ctx: &RenderContext) {
 92        ctx.command_encoder.set_viewport(metal::MTLViewport {
 93            originX: 0.0,
 94            originY: 0.0,
 95            width: ctx.drawable_size.x() as f64,
 96            height: ctx.drawable_size.y() as f64,
 97            znear: 0.0,
 98            zfar: 1.0,
 99        });
100
101        let mut offset = 0;
102        for layer in scene.layers() {
103            self.clip(scene, layer, ctx);
104            self.render_shadows(scene, layer, &mut offset, ctx);
105            self.render_quads(scene, layer, &mut offset, ctx);
106            self.render_sprites(scene, layer, &mut offset, ctx);
107        }
108    }
109
110    fn clip(&mut self, scene: &Scene, layer: &Layer, ctx: &RenderContext) {
111        let clip_bounds = layer.clip_bounds().unwrap_or(RectF::new(
112            vec2f(0., 0.),
113            ctx.drawable_size / scene.scale_factor(),
114        )) * scene.scale_factor();
115        ctx.command_encoder.set_scissor_rect(metal::MTLScissorRect {
116            x: clip_bounds.origin_x() as NSUInteger,
117            y: clip_bounds.origin_y() as NSUInteger,
118            width: clip_bounds.width() as NSUInteger,
119            height: clip_bounds.height() as NSUInteger,
120        });
121    }
122
123    fn render_shadows(
124        &mut self,
125        scene: &Scene,
126        layer: &Layer,
127        offset: &mut usize,
128        ctx: &RenderContext,
129    ) {
130        if layer.shadows().is_empty() {
131            return;
132        }
133
134        align_offset(offset);
135        let next_offset = *offset + layer.shadows().len() * mem::size_of::<shaders::GPUIShadow>();
136        assert!(
137            next_offset <= INSTANCE_BUFFER_SIZE,
138            "instance buffer exhausted"
139        );
140
141        ctx.command_encoder
142            .set_render_pipeline_state(&self.shadow_pipeline_state);
143        ctx.command_encoder.set_vertex_buffer(
144            shaders::GPUIShadowInputIndex_GPUIShadowInputIndexVertices as u64,
145            Some(&self.unit_vertices),
146            0,
147        );
148        ctx.command_encoder.set_vertex_buffer(
149            shaders::GPUIShadowInputIndex_GPUIShadowInputIndexShadows as u64,
150            Some(&self.instances),
151            *offset as u64,
152        );
153        ctx.command_encoder.set_vertex_bytes(
154            shaders::GPUIShadowInputIndex_GPUIShadowInputIndexUniforms as u64,
155            mem::size_of::<shaders::GPUIUniforms>() as u64,
156            [shaders::GPUIUniforms {
157                viewport_size: ctx.drawable_size.to_float2(),
158            }]
159            .as_ptr() as *const c_void,
160        );
161
162        let buffer_contents = unsafe {
163            (self.instances.contents() as *mut u8).offset(*offset as isize)
164                as *mut shaders::GPUIShadow
165        };
166        for (ix, shadow) in layer.shadows().iter().enumerate() {
167            let shape_bounds = shadow.bounds * scene.scale_factor();
168            let shader_shadow = shaders::GPUIShadow {
169                origin: shape_bounds.origin().to_float2(),
170                size: shape_bounds.size().to_float2(),
171                corner_radius: shadow.corner_radius * scene.scale_factor(),
172                sigma: shadow.sigma,
173                color: shadow.color.to_uchar4(),
174            };
175            unsafe {
176                *(buffer_contents.offset(ix as isize)) = shader_shadow;
177            }
178        }
179
180        self.instances.did_modify_range(NSRange {
181            location: *offset as u64,
182            length: (next_offset - *offset) as u64,
183        });
184        *offset = next_offset;
185
186        ctx.command_encoder.draw_primitives_instanced(
187            metal::MTLPrimitiveType::Triangle,
188            0,
189            6,
190            layer.shadows().len() as u64,
191        );
192    }
193
194    fn render_quads(
195        &mut self,
196        scene: &Scene,
197        layer: &Layer,
198        offset: &mut usize,
199        ctx: &RenderContext,
200    ) {
201        if layer.quads().is_empty() {
202            return;
203        }
204        align_offset(offset);
205        let next_offset = *offset + layer.quads().len() * mem::size_of::<shaders::GPUIQuad>();
206        assert!(
207            next_offset <= INSTANCE_BUFFER_SIZE,
208            "instance buffer exhausted"
209        );
210
211        ctx.command_encoder
212            .set_render_pipeline_state(&self.quad_pipeline_state);
213        ctx.command_encoder.set_vertex_buffer(
214            shaders::GPUIQuadInputIndex_GPUIQuadInputIndexVertices as u64,
215            Some(&self.unit_vertices),
216            0,
217        );
218        ctx.command_encoder.set_vertex_buffer(
219            shaders::GPUIQuadInputIndex_GPUIQuadInputIndexQuads as u64,
220            Some(&self.instances),
221            *offset as u64,
222        );
223        ctx.command_encoder.set_vertex_bytes(
224            shaders::GPUIQuadInputIndex_GPUIQuadInputIndexUniforms as u64,
225            mem::size_of::<shaders::GPUIUniforms>() as u64,
226            [shaders::GPUIUniforms {
227                viewport_size: ctx.drawable_size.to_float2(),
228            }]
229            .as_ptr() as *const c_void,
230        );
231
232        let buffer_contents = unsafe {
233            (self.instances.contents() as *mut u8).offset(*offset as isize)
234                as *mut shaders::GPUIQuad
235        };
236        for (ix, quad) in layer.quads().iter().enumerate() {
237            let bounds = quad.bounds * scene.scale_factor();
238            let border_width = quad.border.width * scene.scale_factor();
239            let shader_quad = shaders::GPUIQuad {
240                origin: bounds.origin().to_float2(),
241                size: bounds.size().to_float2(),
242                background_color: quad
243                    .background
244                    .unwrap_or(ColorU::transparent_black())
245                    .to_uchar4(),
246                border_top: border_width * (quad.border.top as usize as f32),
247                border_right: border_width * (quad.border.right as usize as f32),
248                border_bottom: border_width * (quad.border.bottom as usize as f32),
249                border_left: border_width * (quad.border.left as usize as f32),
250                border_color: quad
251                    .border
252                    .color
253                    .unwrap_or(ColorU::transparent_black())
254                    .to_uchar4(),
255                corner_radius: quad.corner_radius * scene.scale_factor(),
256            };
257            unsafe {
258                *(buffer_contents.offset(ix as isize)) = shader_quad;
259            }
260        }
261
262        self.instances.did_modify_range(NSRange {
263            location: *offset as u64,
264            length: (next_offset - *offset) as u64,
265        });
266        *offset = next_offset;
267
268        ctx.command_encoder.draw_primitives_instanced(
269            metal::MTLPrimitiveType::Triangle,
270            0,
271            6,
272            layer.quads().len() as u64,
273        );
274    }
275
276    fn render_sprites(
277        &mut self,
278        scene: &Scene,
279        layer: &Layer,
280        offset: &mut usize,
281        ctx: &RenderContext,
282    ) {
283        if layer.glyphs().is_empty() {
284            return;
285        }
286
287        let mut sprites_by_atlas = HashMap::new();
288        for glyph in layer.glyphs() {
289            if let Some(sprite) = self.sprite_cache.render_glyph(
290                glyph.font_id,
291                glyph.font_size,
292                glyph.id,
293                glyph.origin,
294                scene.scale_factor(),
295            ) {
296                // Snap sprite to pixel grid.
297                let origin = (glyph.origin * scene.scale_factor()).floor() + sprite.offset.to_f32();
298                sprites_by_atlas
299                    .entry(sprite.atlas_id)
300                    .or_insert_with(Vec::new)
301                    .push(shaders::GPUISprite {
302                        origin: origin.to_float2(),
303                        size: sprite.size.to_float2(),
304                        atlas_origin: sprite.atlas_origin.to_float2(),
305                        color: glyph.color.to_uchar4(),
306                    });
307            }
308        }
309
310        ctx.command_encoder
311            .set_render_pipeline_state(&self.sprite_pipeline_state);
312        ctx.command_encoder.set_vertex_buffer(
313            shaders::GPUISpriteVertexInputIndex_GPUISpriteVertexInputIndexVertices as u64,
314            Some(&self.unit_vertices),
315            0,
316        );
317        ctx.command_encoder.set_vertex_bytes(
318            shaders::GPUISpriteVertexInputIndex_GPUISpriteVertexInputIndexViewportSize as u64,
319            mem::size_of::<shaders::vector_float2>() as u64,
320            [ctx.drawable_size.to_float2()].as_ptr() as *const c_void,
321        );
322        ctx.command_encoder.set_vertex_bytes(
323            shaders::GPUISpriteVertexInputIndex_GPUISpriteVertexInputIndexAtlasSize as u64,
324            mem::size_of::<shaders::vector_float2>() as u64,
325            [self.sprite_cache.atlas_size().to_float2()].as_ptr() as *const c_void,
326        );
327
328        for (atlas_id, sprites) in sprites_by_atlas {
329            align_offset(offset);
330            let next_offset = *offset + sprites.len() * mem::size_of::<shaders::GPUISprite>();
331            assert!(
332                next_offset <= INSTANCE_BUFFER_SIZE,
333                "instance buffer exhausted"
334            );
335
336            ctx.command_encoder.set_vertex_buffer(
337                shaders::GPUISpriteVertexInputIndex_GPUISpriteVertexInputIndexSprites as u64,
338                Some(&self.instances),
339                *offset as u64,
340            );
341
342            let texture = self.sprite_cache.atlas_texture(atlas_id).unwrap();
343            ctx.command_encoder.set_fragment_texture(
344                shaders::GPUISpriteFragmentInputIndex_GPUISpriteFragmentInputIndexAtlas as u64,
345                Some(texture),
346            );
347
348            unsafe {
349                let buffer_contents = (self.instances.contents() as *mut u8)
350                    .offset(*offset as isize)
351                    as *mut shaders::GPUISprite;
352                std::ptr::copy_nonoverlapping(sprites.as_ptr(), buffer_contents, sprites.len());
353            }
354            self.instances.did_modify_range(NSRange {
355                location: *offset as u64,
356                length: (next_offset - *offset) as u64,
357            });
358            *offset = next_offset;
359
360            ctx.command_encoder.draw_primitives_instanced(
361                metal::MTLPrimitiveType::Triangle,
362                0,
363                6,
364                sprites.len() as u64,
365            );
366        }
367    }
368}
369
370fn align_offset(offset: &mut usize) {
371    let r = *offset % 256;
372    if r > 0 {
373        *offset += 256 - r; // Align to a multiple of 256 to make Metal happy
374    }
375}
376
377fn build_pipeline_state(
378    device: &metal::DeviceRef,
379    library: &metal::LibraryRef,
380    label: &str,
381    vertex_fn_name: &str,
382    fragment_fn_name: &str,
383    pixel_format: metal::MTLPixelFormat,
384) -> Result<metal::RenderPipelineState> {
385    let vertex_fn = library
386        .get_function(vertex_fn_name, None)
387        .map_err(|message| anyhow!("error locating vertex function: {}", message))?;
388    let fragment_fn = library
389        .get_function(fragment_fn_name, None)
390        .map_err(|message| anyhow!("error locating fragment function: {}", message))?;
391
392    let descriptor = metal::RenderPipelineDescriptor::new();
393    descriptor.set_label(label);
394    descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
395    descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
396    let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
397    color_attachment.set_pixel_format(pixel_format);
398    color_attachment.set_blending_enabled(true);
399    color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
400    color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
401    color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::SourceAlpha);
402    color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::SourceAlpha);
403    color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
404    color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
405
406    device
407        .new_render_pipeline_state(&descriptor)
408        .map_err(|message| anyhow!("could not create render pipeline state: {}", message))
409}
410
411mod shaders {
412    #![allow(non_upper_case_globals)]
413    #![allow(non_camel_case_types)]
414    #![allow(non_snake_case)]
415
416    use pathfinder_geometry::vector::Vector2I;
417
418    use crate::{color::ColorU, geometry::vector::Vector2F};
419    use std::mem;
420
421    include!(concat!(env!("OUT_DIR"), "/shaders.rs"));
422
423    pub trait ToFloat2 {
424        fn to_float2(&self) -> vector_float2;
425    }
426
427    pub trait ToUchar4 {
428        fn to_uchar4(&self) -> vector_uchar4;
429    }
430
431    impl ToFloat2 for (f32, f32) {
432        fn to_float2(&self) -> vector_float2 {
433            unsafe {
434                let mut output = mem::transmute::<_, u32>(self.1.to_bits()) as vector_float2;
435                output <<= 32;
436                output |= mem::transmute::<_, u32>(self.0.to_bits()) as vector_float2;
437                output
438            }
439        }
440    }
441
442    impl ToFloat2 for Vector2F {
443        fn to_float2(&self) -> vector_float2 {
444            unsafe {
445                let mut output = mem::transmute::<_, u32>(self.y().to_bits()) as vector_float2;
446                output <<= 32;
447                output |= mem::transmute::<_, u32>(self.x().to_bits()) as vector_float2;
448                output
449            }
450        }
451    }
452
453    impl ToFloat2 for Vector2I {
454        fn to_float2(&self) -> vector_float2 {
455            self.to_f32().to_float2()
456        }
457    }
458
459    impl ToUchar4 for ColorU {
460        fn to_uchar4(&self) -> vector_uchar4 {
461            let mut vec = self.a as vector_uchar4;
462            vec <<= 8;
463            vec |= self.b as vector_uchar4;
464            vec <<= 8;
465            vec |= self.g as vector_uchar4;
466            vec <<= 8;
467            vec |= self.r as vector_uchar4;
468            vec
469        }
470    }
471}