renderer.rs

  1use super::{sprite_cache::SpriteCache, window::RenderContext};
  2use crate::{
  3    color::ColorU,
  4    geometry::{
  5        rect::RectF,
  6        vector::{vec2f, vec2i, Vector2I},
  7    },
  8    platform,
  9    scene::Layer,
 10    Scene,
 11};
 12use anyhow::{anyhow, Result};
 13use cocoa::foundation::NSUInteger;
 14use metal::{MTLResourceOptions, NSRange};
 15use shaders::{ToFloat2 as _, ToUchar4 as _};
 16use std::{collections::HashMap, ffi::c_void, mem, sync::Arc};
 17
 18const SHADERS_METALLIB: &'static [u8] =
 19    include_bytes!(concat!(env!("OUT_DIR"), "/shaders.metallib"));
 20const INSTANCE_BUFFER_SIZE: usize = 1024 * 1024; // This is an arbitrary decision. There's probably a more optimal value.
 21
 22pub struct Renderer {
 23    sprite_cache: SpriteCache,
 24    quad_pipeline_state: metal::RenderPipelineState,
 25    shadow_pipeline_state: metal::RenderPipelineState,
 26    sprite_pipeline_state: metal::RenderPipelineState,
 27    unit_vertices: metal::Buffer,
 28    instances: metal::Buffer,
 29}
 30
 31impl Renderer {
 32    pub fn new(
 33        device: metal::Device,
 34        pixel_format: metal::MTLPixelFormat,
 35        fonts: Arc<dyn platform::FontSystem>,
 36    ) -> Result<Self> {
 37        let library = device
 38            .new_library_with_data(SHADERS_METALLIB)
 39            .map_err(|message| anyhow!("error building metal library: {}", message))?;
 40
 41        let unit_vertices = [
 42            (0., 0.).to_float2(),
 43            (1., 0.).to_float2(),
 44            (0., 1.).to_float2(),
 45            (0., 1.).to_float2(),
 46            (1., 0.).to_float2(),
 47            (1., 1.).to_float2(),
 48        ];
 49        let unit_vertices = device.new_buffer_with_data(
 50            unit_vertices.as_ptr() as *const c_void,
 51            (unit_vertices.len() * mem::size_of::<shaders::vector_float2>()) as u64,
 52            MTLResourceOptions::StorageModeManaged,
 53        );
 54        let instances = device.new_buffer(
 55            INSTANCE_BUFFER_SIZE as u64,
 56            MTLResourceOptions::StorageModeManaged,
 57        );
 58
 59        let atlas_size: Vector2I = vec2i(1024, 768);
 60        Ok(Self {
 61            sprite_cache: SpriteCache::new(device.clone(), atlas_size, fonts),
 62            quad_pipeline_state: build_pipeline_state(
 63                &device,
 64                &library,
 65                "quad",
 66                "quad_vertex",
 67                "quad_fragment",
 68                pixel_format,
 69            )?,
 70            shadow_pipeline_state: build_pipeline_state(
 71                &device,
 72                &library,
 73                "shadow",
 74                "shadow_vertex",
 75                "shadow_fragment",
 76                pixel_format,
 77            )?,
 78            sprite_pipeline_state: build_pipeline_state(
 79                &device,
 80                &library,
 81                "sprite",
 82                "sprite_vertex",
 83                "sprite_fragment",
 84                pixel_format,
 85            )?,
 86            unit_vertices,
 87            instances,
 88        })
 89    }
 90
 91    pub fn render(&mut self, scene: &Scene, ctx: &RenderContext) {
 92        ctx.command_encoder.set_viewport(metal::MTLViewport {
 93            originX: 0.0,
 94            originY: 0.0,
 95            width: ctx.drawable_size.x() as f64,
 96            height: ctx.drawable_size.y() as f64,
 97            znear: 0.0,
 98            zfar: 1.0,
 99        });
100
101        let mut offset = 0;
102        for layer in scene.layers() {
103            self.clip(scene, layer, ctx);
104            self.render_shadows(scene, layer, &mut offset, ctx);
105            self.render_quads(scene, layer, &mut offset, ctx);
106            self.render_sprites(scene, layer, &mut offset, ctx);
107        }
108    }
109
110    fn clip(&mut self, scene: &Scene, layer: &Layer, ctx: &RenderContext) {
111        let clip_bounds = layer.clip_bounds().unwrap_or(RectF::new(
112            vec2f(0., 0.),
113            ctx.drawable_size / scene.scale_factor(),
114        )) * scene.scale_factor();
115        ctx.command_encoder.set_scissor_rect(metal::MTLScissorRect {
116            x: clip_bounds.origin_x() as NSUInteger,
117            y: clip_bounds.origin_y() as NSUInteger,
118            width: clip_bounds.width() as NSUInteger,
119            height: clip_bounds.height() as NSUInteger,
120        });
121    }
122
123    fn render_shadows(
124        &mut self,
125        scene: &Scene,
126        layer: &Layer,
127        offset: &mut usize,
128        ctx: &RenderContext,
129    ) {
130        if layer.shadows().is_empty() {
131            return;
132        }
133
134        align_offset(offset);
135        let next_offset = *offset + layer.shadows().len() * mem::size_of::<shaders::GPUIShadow>();
136        assert!(
137            next_offset <= INSTANCE_BUFFER_SIZE,
138            "instance buffer exhausted"
139        );
140
141        ctx.command_encoder
142            .set_render_pipeline_state(&self.shadow_pipeline_state);
143        ctx.command_encoder.set_vertex_buffer(
144            shaders::GPUIShadowInputIndex_GPUIShadowInputIndexVertices as u64,
145            Some(&self.unit_vertices),
146            0,
147        );
148        ctx.command_encoder.set_vertex_buffer(
149            shaders::GPUIShadowInputIndex_GPUIShadowInputIndexShadows as u64,
150            Some(&self.instances),
151            *offset as u64,
152        );
153        ctx.command_encoder.set_vertex_bytes(
154            shaders::GPUIShadowInputIndex_GPUIShadowInputIndexUniforms as u64,
155            mem::size_of::<shaders::GPUIUniforms>() as u64,
156            [shaders::GPUIUniforms {
157                viewport_size: ctx.drawable_size.to_float2(),
158            }]
159            .as_ptr() as *const c_void,
160        );
161
162        let buffer_contents = unsafe {
163            (self.instances.contents() as *mut u8).offset(*offset as isize)
164                as *mut shaders::GPUIShadow
165        };
166        for (ix, shadow) in layer.shadows().iter().enumerate() {
167            let shape_bounds = shadow.bounds * scene.scale_factor();
168            let shader_shadow = shaders::GPUIShadow {
169                origin: shape_bounds.origin().to_float2(),
170                size: shape_bounds.size().to_float2(),
171                corner_radius: shadow.corner_radius * scene.scale_factor(),
172                sigma: shadow.sigma,
173                color: shadow.color.to_uchar4(),
174            };
175            unsafe {
176                *(buffer_contents.offset(ix as isize)) = shader_shadow;
177            }
178        }
179
180        self.instances.did_modify_range(NSRange {
181            location: *offset as u64,
182            length: (next_offset - *offset) as u64,
183        });
184        *offset = next_offset;
185
186        ctx.command_encoder.draw_primitives_instanced(
187            metal::MTLPrimitiveType::Triangle,
188            0,
189            6,
190            layer.shadows().len() as u64,
191        );
192    }
193
194    fn render_quads(
195        &mut self,
196        scene: &Scene,
197        layer: &Layer,
198        offset: &mut usize,
199        ctx: &RenderContext,
200    ) {
201        if layer.quads().is_empty() {
202            return;
203        }
204        align_offset(offset);
205        let next_offset = *offset + layer.quads().len() * mem::size_of::<shaders::GPUIQuad>();
206        assert!(
207            next_offset <= INSTANCE_BUFFER_SIZE,
208            "instance buffer exhausted"
209        );
210
211        ctx.command_encoder
212            .set_render_pipeline_state(&self.quad_pipeline_state);
213        ctx.command_encoder.set_vertex_buffer(
214            shaders::GPUIQuadInputIndex_GPUIQuadInputIndexVertices as u64,
215            Some(&self.unit_vertices),
216            0,
217        );
218        ctx.command_encoder.set_vertex_buffer(
219            shaders::GPUIQuadInputIndex_GPUIQuadInputIndexQuads as u64,
220            Some(&self.instances),
221            *offset as u64,
222        );
223        ctx.command_encoder.set_vertex_bytes(
224            shaders::GPUIQuadInputIndex_GPUIQuadInputIndexUniforms as u64,
225            mem::size_of::<shaders::GPUIUniforms>() as u64,
226            [shaders::GPUIUniforms {
227                viewport_size: ctx.drawable_size.to_float2(),
228            }]
229            .as_ptr() as *const c_void,
230        );
231
232        let buffer_contents = unsafe {
233            (self.instances.contents() as *mut u8).offset(*offset as isize)
234                as *mut shaders::GPUIQuad
235        };
236        for (ix, quad) in layer.quads().iter().enumerate() {
237            let bounds = quad.bounds * scene.scale_factor();
238            let border_width = quad.border.width * scene.scale_factor();
239            let shader_quad = shaders::GPUIQuad {
240                origin: bounds.origin().to_float2(),
241                size: bounds.size().to_float2(),
242                background_color: quad
243                    .background
244                    .unwrap_or(ColorU::transparent_black())
245                    .to_uchar4(),
246                border_top: border_width * (quad.border.top as usize as f32),
247                border_right: border_width * (quad.border.right as usize as f32),
248                border_bottom: border_width * (quad.border.bottom as usize as f32),
249                border_left: border_width * (quad.border.left as usize as f32),
250                border_color: quad
251                    .border
252                    .color
253                    .unwrap_or(ColorU::transparent_black())
254                    .to_uchar4(),
255                corner_radius: quad.corner_radius * scene.scale_factor(),
256            };
257            unsafe {
258                *(buffer_contents.offset(ix as isize)) = shader_quad;
259            }
260        }
261
262        self.instances.did_modify_range(NSRange {
263            location: *offset as u64,
264            length: (next_offset - *offset) as u64,
265        });
266        *offset = next_offset;
267
268        ctx.command_encoder.draw_primitives_instanced(
269            metal::MTLPrimitiveType::Triangle,
270            0,
271            6,
272            layer.quads().len() as u64,
273        );
274    }
275
276    fn render_sprites(
277        &mut self,
278        scene: &Scene,
279        layer: &Layer,
280        offset: &mut usize,
281        ctx: &RenderContext,
282    ) {
283        if layer.glyphs().is_empty() {
284            return;
285        }
286
287        let mut sprites_by_atlas = HashMap::new();
288        for glyph in layer.glyphs() {
289            if let Some(sprite) = self.sprite_cache.render_glyph(
290                glyph.font_id,
291                glyph.font_size,
292                glyph.id,
293                glyph.origin,
294                scene.scale_factor(),
295            ) {
296                sprites_by_atlas
297                    .entry(sprite.atlas_id)
298                    .or_insert_with(Vec::new)
299                    .push(shaders::GPUISprite {
300                        origin: (glyph.origin * scene.scale_factor() + sprite.offset).to_float2(),
301                        size: sprite.size.to_float2(),
302                        atlas_origin: sprite.atlas_origin.to_float2(),
303                        color: glyph.color.to_uchar4(),
304                    });
305            }
306        }
307
308        ctx.command_encoder
309            .set_render_pipeline_state(&self.sprite_pipeline_state);
310        ctx.command_encoder.set_vertex_buffer(
311            shaders::GPUISpriteVertexInputIndex_GPUISpriteVertexInputIndexVertices as u64,
312            Some(&self.unit_vertices),
313            0,
314        );
315        ctx.command_encoder.set_vertex_bytes(
316            shaders::GPUISpriteVertexInputIndex_GPUISpriteVertexInputIndexViewportSize as u64,
317            mem::size_of::<shaders::vector_float2>() as u64,
318            [ctx.drawable_size.to_float2()].as_ptr() as *const c_void,
319        );
320        ctx.command_encoder.set_vertex_bytes(
321            shaders::GPUISpriteVertexInputIndex_GPUISpriteVertexInputIndexAtlasSize as u64,
322            mem::size_of::<shaders::vector_float2>() as u64,
323            [self.sprite_cache.atlas_size().to_float2()].as_ptr() as *const c_void,
324        );
325
326        for (atlas_id, sprites) in sprites_by_atlas {
327            align_offset(offset);
328            let next_offset = *offset + sprites.len() * mem::size_of::<shaders::GPUISprite>();
329            assert!(
330                next_offset <= INSTANCE_BUFFER_SIZE,
331                "instance buffer exhausted"
332            );
333
334            ctx.command_encoder.set_vertex_buffer(
335                shaders::GPUISpriteVertexInputIndex_GPUISpriteVertexInputIndexSprites as u64,
336                Some(&self.instances),
337                *offset as u64,
338            );
339
340            let texture = self.sprite_cache.atlas_texture(atlas_id).unwrap();
341            ctx.command_encoder.set_fragment_texture(
342                shaders::GPUISpriteFragmentInputIndex_GPUISpriteFragmentInputIndexAtlas as u64,
343                Some(texture),
344            );
345
346            unsafe {
347                let buffer_contents = (self.instances.contents() as *mut u8)
348                    .offset(*offset as isize)
349                    as *mut shaders::GPUISprite;
350                std::ptr::copy_nonoverlapping(sprites.as_ptr(), buffer_contents, sprites.len());
351            }
352            self.instances.did_modify_range(NSRange {
353                location: *offset as u64,
354                length: (next_offset - *offset) as u64,
355            });
356            *offset = next_offset;
357
358            ctx.command_encoder.draw_primitives_instanced(
359                metal::MTLPrimitiveType::Triangle,
360                0,
361                6,
362                sprites.len() as u64,
363            );
364        }
365    }
366}
367
368fn align_offset(offset: &mut usize) {
369    let r = *offset % 256;
370    if r > 0 {
371        *offset += 256 - r; // Align to a multiple of 256 to make Metal happy
372    }
373}
374
375fn build_pipeline_state(
376    device: &metal::DeviceRef,
377    library: &metal::LibraryRef,
378    label: &str,
379    vertex_fn_name: &str,
380    fragment_fn_name: &str,
381    pixel_format: metal::MTLPixelFormat,
382) -> Result<metal::RenderPipelineState> {
383    let vertex_fn = library
384        .get_function(vertex_fn_name, None)
385        .map_err(|message| anyhow!("error locating vertex function: {}", message))?;
386    let fragment_fn = library
387        .get_function(fragment_fn_name, None)
388        .map_err(|message| anyhow!("error locating fragment function: {}", message))?;
389
390    let descriptor = metal::RenderPipelineDescriptor::new();
391    descriptor.set_label(label);
392    descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
393    descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
394    let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
395    color_attachment.set_pixel_format(pixel_format);
396    color_attachment.set_blending_enabled(true);
397    color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
398    color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
399    color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::SourceAlpha);
400    color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::SourceAlpha);
401    color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
402    color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
403
404    device
405        .new_render_pipeline_state(&descriptor)
406        .map_err(|message| anyhow!("could not create render pipeline state: {}", message))
407}
408
409mod shaders {
410    #![allow(non_upper_case_globals)]
411    #![allow(non_camel_case_types)]
412    #![allow(non_snake_case)]
413
414    use pathfinder_geometry::vector::Vector2I;
415
416    use crate::{color::ColorU, geometry::vector::Vector2F};
417    use std::mem;
418
419    include!(concat!(env!("OUT_DIR"), "/shaders.rs"));
420
421    pub trait ToFloat2 {
422        fn to_float2(&self) -> vector_float2;
423    }
424
425    pub trait ToUchar4 {
426        fn to_uchar4(&self) -> vector_uchar4;
427    }
428
429    impl ToFloat2 for (f32, f32) {
430        fn to_float2(&self) -> vector_float2 {
431            unsafe {
432                let mut output = mem::transmute::<_, u32>(self.1.to_bits()) as vector_float2;
433                output <<= 32;
434                output |= mem::transmute::<_, u32>(self.0.to_bits()) as vector_float2;
435                output
436            }
437        }
438    }
439
440    impl ToFloat2 for Vector2F {
441        fn to_float2(&self) -> vector_float2 {
442            unsafe {
443                let mut output = mem::transmute::<_, u32>(self.y().to_bits()) as vector_float2;
444                output <<= 32;
445                output |= mem::transmute::<_, u32>(self.x().to_bits()) as vector_float2;
446                output
447            }
448        }
449    }
450
451    impl ToFloat2 for Vector2I {
452        fn to_float2(&self) -> vector_float2 {
453            self.to_f32().to_float2()
454        }
455    }
456
457    impl ToUchar4 for ColorU {
458        fn to_uchar4(&self) -> vector_uchar4 {
459            let mut vec = self.a as vector_uchar4;
460            vec <<= 8;
461            vec |= self.b as vector_uchar4;
462            vec <<= 8;
463            vec |= self.g as vector_uchar4;
464            vec <<= 8;
465            vec |= self.r as vector_uchar4;
466            vec
467        }
468    }
469}