renderer.rs

  1use std::{collections::HashMap, ffi::c_void, mem};
  2
  3use self::shaders::ToUchar4;
  4
  5use super::window::RenderContext;
  6use crate::{color::ColorU, scene::Layer, Scene};
  7use anyhow::{anyhow, Result};
  8use metal::{MTLResourceOptions, NSRange};
  9use shaders::ToFloat2 as _;
 10
 11const SHADERS_METALLIB: &'static [u8] =
 12    include_bytes!(concat!(env!("OUT_DIR"), "/shaders.metallib"));
 13const INSTANCE_BUFFER_SIZE: usize = 1024 * 1024; // This is an arbitrary decision. There's probably a more optimal value.
 14
 15pub struct Renderer {
 16    quad_pipeline_state: metal::RenderPipelineState,
 17    shadow_pipeline_state: metal::RenderPipelineState,
 18    sprite_pipeline_state: metal::RenderPipelineState,
 19    unit_vertices: metal::Buffer,
 20    instances: metal::Buffer,
 21    sprite_cache: SpriteCache,
 22}
 23
 24impl Renderer {
 25    pub fn new(device: &metal::DeviceRef, pixel_format: metal::MTLPixelFormat) -> Result<Self> {
 26        let library = device
 27            .new_library_with_data(SHADERS_METALLIB)
 28            .map_err(|message| anyhow!("error building metal library: {}", message))?;
 29
 30        let unit_vertices = [
 31            (0., 0.).to_float2(),
 32            (1., 0.).to_float2(),
 33            (0., 1.).to_float2(),
 34            (0., 1.).to_float2(),
 35            (1., 0.).to_float2(),
 36            (1., 1.).to_float2(),
 37        ];
 38        let unit_vertices = device.new_buffer_with_data(
 39            unit_vertices.as_ptr() as *const c_void,
 40            (unit_vertices.len() * mem::size_of::<shaders::vector_float2>()) as u64,
 41            MTLResourceOptions::StorageModeManaged,
 42        );
 43        let instances = device.new_buffer(
 44            INSTANCE_BUFFER_SIZE as u64,
 45            MTLResourceOptions::StorageModeManaged,
 46        );
 47
 48        Ok(Self {
 49            quad_pipeline_state: build_pipeline_state(
 50                device,
 51                &library,
 52                "quad",
 53                "quad_vertex",
 54                "quad_fragment",
 55                pixel_format,
 56            )?,
 57            shadow_pipeline_state: build_pipeline_state(
 58                device,
 59                &library,
 60                "shadow",
 61                "shadow_vertex",
 62                "shadow_fragment",
 63                pixel_format,
 64            )?,
 65            sprite_pipeline_state: build_pipeline_state(
 66                device,
 67                &library,
 68                "sprite",
 69                "sprite_vertex",
 70                "sprite_fragment",
 71                pixel_format,
 72            )?,
 73            unit_vertices,
 74            instances,
 75        })
 76    }
 77
 78    pub fn render(&mut self, scene: &Scene, ctx: &RenderContext) {
 79        ctx.command_encoder.set_viewport(metal::MTLViewport {
 80            originX: 0.0,
 81            originY: 0.0,
 82            width: ctx.drawable_size.x() as f64,
 83            height: ctx.drawable_size.y() as f64,
 84            znear: 0.0,
 85            zfar: 1.0,
 86        });
 87
 88        let mut offset = 0;
 89        for layer in scene.layers() {
 90            self.render_shadows(scene, layer, &mut offset, ctx);
 91            self.render_quads(scene, layer, &mut offset, ctx);
 92            self.render_sprites(scene, layer, &mut offset, ctx);
 93        }
 94    }
 95
 96    fn render_shadows(
 97        &mut self,
 98        scene: &Scene,
 99        layer: &Layer,
100        offset: &mut usize,
101        ctx: &RenderContext,
102    ) {
103        if layer.shadows().is_empty() {
104            return;
105        }
106
107        align_offset(offset);
108        let next_offset = *offset + layer.shadows().len() * mem::size_of::<shaders::GPUIShadow>();
109        assert!(
110            next_offset <= INSTANCE_BUFFER_SIZE,
111            "instance buffer exhausted"
112        );
113
114        ctx.command_encoder
115            .set_render_pipeline_state(&self.shadow_pipeline_state);
116        ctx.command_encoder.set_vertex_buffer(
117            shaders::GPUIShadowInputIndex_GPUIShadowInputIndexVertices as u64,
118            Some(&self.unit_vertices),
119            0,
120        );
121        ctx.command_encoder.set_vertex_buffer(
122            shaders::GPUIShadowInputIndex_GPUIShadowInputIndexShadows as u64,
123            Some(&self.instances),
124            *offset as u64,
125        );
126        ctx.command_encoder.set_vertex_bytes(
127            shaders::GPUIShadowInputIndex_GPUIShadowInputIndexUniforms as u64,
128            mem::size_of::<shaders::GPUIUniforms>() as u64,
129            [shaders::GPUIUniforms {
130                viewport_size: ctx.drawable_size.to_float2(),
131            }]
132            .as_ptr() as *const c_void,
133        );
134
135        let buffer_contents = unsafe {
136            (self.instances.contents() as *mut u8).offset(*offset as isize)
137                as *mut shaders::GPUIShadow
138        };
139        for (ix, shadow) in layer.shadows().iter().enumerate() {
140            let shape_bounds = shadow.bounds * scene.scale_factor();
141            let shader_shadow = shaders::GPUIShadow {
142                origin: shape_bounds.origin().to_float2(),
143                size: shape_bounds.size().to_float2(),
144                corner_radius: shadow.corner_radius * scene.scale_factor(),
145                sigma: shadow.sigma,
146                color: shadow.color.to_uchar4(),
147            };
148            unsafe {
149                *(buffer_contents.offset(ix as isize)) = shader_shadow;
150            }
151        }
152
153        self.instances.did_modify_range(NSRange {
154            location: *offset as u64,
155            length: (next_offset - *offset) as u64,
156        });
157        *offset = next_offset;
158
159        ctx.command_encoder.draw_primitives_instanced(
160            metal::MTLPrimitiveType::Triangle,
161            0,
162            6,
163            layer.shadows().len() as u64,
164        );
165    }
166
167    fn render_quads(
168        &mut self,
169        scene: &Scene,
170        layer: &Layer,
171        offset: &mut usize,
172        ctx: &RenderContext,
173    ) {
174        if layer.quads().is_empty() {
175            return;
176        }
177        align_offset(offset);
178        let next_offset = *offset + layer.quads().len() * mem::size_of::<shaders::GPUIQuad>();
179        assert!(
180            next_offset <= INSTANCE_BUFFER_SIZE,
181            "instance buffer exhausted"
182        );
183
184        ctx.command_encoder
185            .set_render_pipeline_state(&self.quad_pipeline_state);
186        ctx.command_encoder.set_vertex_buffer(
187            shaders::GPUIQuadInputIndex_GPUIQuadInputIndexVertices as u64,
188            Some(&self.unit_vertices),
189            0,
190        );
191        ctx.command_encoder.set_vertex_buffer(
192            shaders::GPUIQuadInputIndex_GPUIQuadInputIndexQuads as u64,
193            Some(&self.instances),
194            *offset as u64,
195        );
196        ctx.command_encoder.set_vertex_bytes(
197            shaders::GPUIQuadInputIndex_GPUIQuadInputIndexUniforms as u64,
198            mem::size_of::<shaders::GPUIUniforms>() as u64,
199            [shaders::GPUIUniforms {
200                viewport_size: ctx.drawable_size.to_float2(),
201            }]
202            .as_ptr() as *const c_void,
203        );
204
205        let buffer_contents = unsafe {
206            (self.instances.contents() as *mut u8).offset(*offset as isize)
207                as *mut shaders::GPUIQuad
208        };
209        for (ix, quad) in layer.quads().iter().enumerate() {
210            let bounds = quad.bounds * scene.scale_factor();
211            let border_width = quad.border.width * scene.scale_factor();
212            let shader_quad = shaders::GPUIQuad {
213                origin: bounds.origin().to_float2(),
214                size: bounds.size().to_float2(),
215                background_color: quad
216                    .background
217                    .unwrap_or(ColorU::transparent_black())
218                    .to_uchar4(),
219                border_top: border_width * (quad.border.top as usize as f32),
220                border_right: border_width * (quad.border.right as usize as f32),
221                border_bottom: border_width * (quad.border.bottom as usize as f32),
222                border_left: border_width * (quad.border.left as usize as f32),
223                border_color: quad
224                    .border
225                    .color
226                    .unwrap_or(ColorU::transparent_black())
227                    .to_uchar4(),
228                corner_radius: quad.corner_radius * scene.scale_factor(),
229            };
230            unsafe {
231                *(buffer_contents.offset(ix as isize)) = shader_quad;
232            }
233        }
234
235        self.instances.did_modify_range(NSRange {
236            location: *offset as u64,
237            length: (next_offset - *offset) as u64,
238        });
239        *offset = next_offset;
240
241        ctx.command_encoder.draw_primitives_instanced(
242            metal::MTLPrimitiveType::Triangle,
243            0,
244            6,
245            layer.quads().len() as u64,
246        );
247    }
248
249    fn render_sprites(
250        &mut self,
251        scene: &Scene,
252        layer: &Layer,
253        offset: &mut usize,
254        ctx: &RenderContext,
255    ) {
256        if layer.glyphs().is_empty() {
257            return;
258        }
259
260        align_offset(offset);
261        let next_offset = *offset + layer.glyphs().len() * mem::size_of::<shaders::GPUISprite>();
262        assert!(
263            next_offset <= INSTANCE_BUFFER_SIZE,
264            "instance buffer exhausted"
265        );
266
267        let mut sprites = HashMap::new();
268        for glyph in layer.glyphs() {
269            let (atlas, bounds) =
270                self.sprite_cache
271                    .rasterize_glyph(glyph.font_id, glyph.font_size, glyph.glyph_id);
272            sprites
273                .entry(atlas)
274                .or_insert_with(Vec::new)
275                .push(shaders::GPUISprite {
276                    origin: glyph.origin.to_float2(),
277                    size: bounds.size().to_float2(),
278                    atlas_origin: bounds.origin().to_float2(),
279                    color: glyph.color.to_uchar4(),
280                });
281        }
282
283        ctx.command_encoder
284            .set_render_pipeline_state(&self.sprite_pipeline_state);
285        ctx.command_encoder.set_vertex_buffer(
286            shaders::GPUISpriteInputIndex_GPUISpriteInputIndexVertices as u64,
287            Some(&self.unit_vertices),
288            0,
289        );
290        ctx.command_encoder.set_vertex_buffer(
291            shaders::GPUISpriteInputIndex_GPUISpriteInputIndexSprites as u64,
292            Some(&self.instances),
293            *offset as u64,
294        );
295        ctx.command_encoder.set_vertex_bytes(
296            shaders::GPUISpriteInputIndex_GPUISpriteInputIndexUniforms as u64,
297            mem::size_of::<shaders::GPUIUniforms>() as u64,
298            [shaders::GPUIUniforms {
299                viewport_size: ctx.drawable_size.to_float2(),
300            }]
301            .as_ptr() as *const c_void,
302        );
303
304        let buffer_contents = unsafe {
305            (self.instances.contents() as *mut u8).offset(*offset as isize)
306                as *mut shaders::GPUISprite
307        };
308
309        for glyph in layer.glyphs() {
310            let sprite = self.sprite_cache.rasterize_glyph();
311        }
312    }
313}
314
315fn align_offset(offset: &mut usize) {
316    let r = *offset % 256;
317    if r > 0 {
318        *offset += 256 - r; // Align to a multiple of 256 to make Metal happy
319    }
320}
321
322fn build_pipeline_state(
323    device: &metal::DeviceRef,
324    library: &metal::LibraryRef,
325    label: &str,
326    vertex_fn_name: &str,
327    fragment_fn_name: &str,
328    pixel_format: metal::MTLPixelFormat,
329) -> Result<metal::RenderPipelineState> {
330    let vertex_fn = library
331        .get_function(vertex_fn_name, None)
332        .map_err(|message| anyhow!("error locating vertex function: {}", message))?;
333    let fragment_fn = library
334        .get_function(fragment_fn_name, None)
335        .map_err(|message| anyhow!("error locating fragment function: {}", message))?;
336
337    let descriptor = metal::RenderPipelineDescriptor::new();
338    descriptor.set_label(label);
339    descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
340    descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
341    let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
342    color_attachment.set_pixel_format(pixel_format);
343    color_attachment.set_blending_enabled(true);
344    color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
345    color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
346    color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::SourceAlpha);
347    color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::SourceAlpha);
348    color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
349    color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
350
351    device
352        .new_render_pipeline_state(&descriptor)
353        .map_err(|message| anyhow!("could not create render pipeline state: {}", message))
354}
355
356mod shaders {
357    #![allow(non_upper_case_globals)]
358    #![allow(non_camel_case_types)]
359    #![allow(non_snake_case)]
360
361    use crate::{color::ColorU, geometry::vector::Vector2F};
362    use std::mem;
363
364    include!(concat!(env!("OUT_DIR"), "/shaders.rs"));
365
366    pub trait ToFloat2 {
367        fn to_float2(&self) -> vector_float2;
368    }
369
370    pub trait ToUchar4 {
371        fn to_uchar4(&self) -> vector_uchar4;
372    }
373
374    impl ToFloat2 for (f32, f32) {
375        fn to_float2(&self) -> vector_float2 {
376            unsafe {
377                let mut output = mem::transmute::<_, u32>(self.1.to_bits()) as vector_float2;
378                output <<= 32;
379                output |= mem::transmute::<_, u32>(self.0.to_bits()) as vector_float2;
380                output
381            }
382        }
383    }
384
385    impl ToFloat2 for Vector2F {
386        fn to_float2(&self) -> vector_float2 {
387            unsafe {
388                let mut output = mem::transmute::<_, u32>(self.y().to_bits()) as vector_float2;
389                output <<= 32;
390                output |= mem::transmute::<_, u32>(self.x().to_bits()) as vector_float2;
391                output
392            }
393        }
394    }
395
396    impl ToUchar4 for ColorU {
397        fn to_uchar4(&self) -> vector_uchar4 {
398            let mut vec = self.a as vector_uchar4;
399            vec <<= 8;
400            vec |= self.b as vector_uchar4;
401            vec <<= 8;
402            vec |= self.g as vector_uchar4;
403            vec <<= 8;
404            vec |= self.r as vector_uchar4;
405            vec
406        }
407    }
408}