renderer.rs

  1use std::{ffi::c_void, mem};
  2
  3use self::shaders::ToUchar4;
  4
  5use super::window::RenderContext;
  6use crate::{color::ColorU, scene::Layer, Scene};
  7use anyhow::{anyhow, Result};
  8use metal::{MTLResourceOptions, NSRange};
  9use shaders::ToFloat2 as _;
 10
 11const SHADERS_METALLIB: &'static [u8] =
 12    include_bytes!(concat!(env!("OUT_DIR"), "/shaders.metallib"));
 13const INSTANCE_BUFFER_SIZE: usize = 1024 * 1024; // This is an arbitrary decision. There's probably a more optimal value.
 14
 15pub struct Renderer {
 16    quad_pipeline_state: metal::RenderPipelineState,
 17    shadow_pipeline_state: metal::RenderPipelineState,
 18    unit_vertices: metal::Buffer,
 19    instances: metal::Buffer,
 20}
 21
 22impl Renderer {
 23    pub fn new(device: &metal::DeviceRef, pixel_format: metal::MTLPixelFormat) -> Result<Self> {
 24        let library = device
 25            .new_library_with_data(SHADERS_METALLIB)
 26            .map_err(|message| anyhow!("error building metal library: {}", message))?;
 27
 28        let unit_vertices = [
 29            (0., 0.).to_float2(),
 30            (1., 0.).to_float2(),
 31            (0., 1.).to_float2(),
 32            (0., 1.).to_float2(),
 33            (1., 0.).to_float2(),
 34            (1., 1.).to_float2(),
 35        ];
 36        let unit_vertices = device.new_buffer_with_data(
 37            unit_vertices.as_ptr() as *const c_void,
 38            (unit_vertices.len() * mem::size_of::<shaders::vector_float2>()) as u64,
 39            MTLResourceOptions::StorageModeManaged,
 40        );
 41        let instances = device.new_buffer(
 42            INSTANCE_BUFFER_SIZE as u64,
 43            MTLResourceOptions::StorageModeManaged,
 44        );
 45
 46        Ok(Self {
 47            quad_pipeline_state: build_pipeline_state(
 48                device,
 49                &library,
 50                "quad",
 51                "quad_vertex",
 52                "quad_fragment",
 53                pixel_format,
 54            )?,
 55            shadow_pipeline_state: build_pipeline_state(
 56                device,
 57                &library,
 58                "shadow",
 59                "shadow_vertex",
 60                "shadow_fragment",
 61                pixel_format,
 62            )?,
 63            unit_vertices,
 64            instances,
 65        })
 66    }
 67
 68    pub fn render(&mut self, scene: &Scene, ctx: &RenderContext) {
 69        ctx.command_encoder.set_viewport(metal::MTLViewport {
 70            originX: 0.0,
 71            originY: 0.0,
 72            width: ctx.drawable_size.x() as f64,
 73            height: ctx.drawable_size.y() as f64,
 74            znear: 0.0,
 75            zfar: 1.0,
 76        });
 77
 78        let mut offset = 0;
 79        for layer in scene.layers() {
 80            self.render_shadows(scene, layer, &mut offset, ctx);
 81            self.render_quads(scene, layer, &mut offset, ctx);
 82        }
 83    }
 84
 85    fn render_shadows(
 86        &mut self,
 87        scene: &Scene,
 88        layer: &Layer,
 89        offset: &mut usize,
 90        ctx: &RenderContext,
 91    ) {
 92        if layer.shadows().is_empty() {
 93            return;
 94        }
 95
 96        align_offset(offset);
 97        let next_offset = *offset + layer.shadows().len() * mem::size_of::<shaders::GPUIShadow>();
 98        assert!(
 99            next_offset <= INSTANCE_BUFFER_SIZE,
100            "instance buffer exhausted"
101        );
102
103        ctx.command_encoder
104            .set_render_pipeline_state(&self.shadow_pipeline_state);
105        ctx.command_encoder.set_vertex_buffer(
106            shaders::GPUIShadowInputIndex_GPUIShadowInputIndexVertices as u64,
107            Some(&self.unit_vertices),
108            0,
109        );
110        ctx.command_encoder.set_vertex_buffer(
111            shaders::GPUIShadowInputIndex_GPUIShadowInputIndexShadows as u64,
112            Some(&self.instances),
113            *offset as u64,
114        );
115        ctx.command_encoder.set_vertex_bytes(
116            shaders::GPUIShadowInputIndex_GPUIShadowInputIndexUniforms as u64,
117            mem::size_of::<shaders::GPUIUniforms>() as u64,
118            [shaders::GPUIUniforms {
119                viewport_size: ctx.drawable_size.to_float2(),
120            }]
121            .as_ptr() as *const c_void,
122        );
123
124        let buffer_contents = unsafe {
125            (self.instances.contents() as *mut u8).offset(*offset as isize)
126                as *mut shaders::GPUIShadow
127        };
128        for (ix, shadow) in layer.shadows().iter().enumerate() {
129            let shape_bounds = shadow.bounds * scene.scale_factor();
130            let shader_shadow = shaders::GPUIShadow {
131                origin: shape_bounds.origin().to_float2(),
132                size: shape_bounds.size().to_float2(),
133                corner_radius: shadow.corner_radius * scene.scale_factor(),
134                sigma: shadow.sigma,
135                color: shadow.color.to_uchar4(),
136            };
137            unsafe {
138                *(buffer_contents.offset(ix as isize)) = shader_shadow;
139            }
140        }
141
142        self.instances.did_modify_range(NSRange {
143            location: *offset as u64,
144            length: (next_offset - *offset) as u64,
145        });
146        *offset = next_offset;
147
148        ctx.command_encoder.draw_primitives_instanced(
149            metal::MTLPrimitiveType::Triangle,
150            0,
151            6,
152            layer.shadows().len() as u64,
153        );
154    }
155
156    fn render_quads(
157        &mut self,
158        scene: &Scene,
159        layer: &Layer,
160        offset: &mut usize,
161        ctx: &RenderContext,
162    ) {
163        if layer.quads().is_empty() {
164            return;
165        }
166        align_offset(offset);
167        let next_offset = *offset + layer.quads().len() * mem::size_of::<shaders::GPUIQuad>();
168        assert!(
169            next_offset <= INSTANCE_BUFFER_SIZE,
170            "instance buffer exhausted"
171        );
172
173        ctx.command_encoder
174            .set_render_pipeline_state(&self.quad_pipeline_state);
175        ctx.command_encoder.set_vertex_buffer(
176            shaders::GPUIQuadInputIndex_GPUIQuadInputIndexVertices as u64,
177            Some(&self.unit_vertices),
178            0,
179        );
180        ctx.command_encoder.set_vertex_buffer(
181            shaders::GPUIQuadInputIndex_GPUIQuadInputIndexQuads as u64,
182            Some(&self.instances),
183            *offset as u64,
184        );
185        ctx.command_encoder.set_vertex_bytes(
186            shaders::GPUIQuadInputIndex_GPUIQuadInputIndexUniforms as u64,
187            mem::size_of::<shaders::GPUIUniforms>() as u64,
188            [shaders::GPUIUniforms {
189                viewport_size: ctx.drawable_size.to_float2(),
190            }]
191            .as_ptr() as *const c_void,
192        );
193
194        let buffer_contents = unsafe {
195            (self.instances.contents() as *mut u8).offset(*offset as isize)
196                as *mut shaders::GPUIQuad
197        };
198        for (ix, quad) in layer.quads().iter().enumerate() {
199            let bounds = quad.bounds * scene.scale_factor();
200            let border_width = quad.border.width * scene.scale_factor();
201            let shader_quad = shaders::GPUIQuad {
202                origin: bounds.origin().to_float2(),
203                size: bounds.size().to_float2(),
204                background_color: quad
205                    .background
206                    .unwrap_or(ColorU::transparent_black())
207                    .to_uchar4(),
208                border_top: border_width * (quad.border.top as usize as f32),
209                border_right: border_width * (quad.border.right as usize as f32),
210                border_bottom: border_width * (quad.border.bottom as usize as f32),
211                border_left: border_width * (quad.border.left as usize as f32),
212                border_color: quad
213                    .border
214                    .color
215                    .unwrap_or(ColorU::transparent_black())
216                    .to_uchar4(),
217                corner_radius: quad.corner_radius * scene.scale_factor(),
218            };
219            unsafe {
220                *(buffer_contents.offset(ix as isize)) = shader_quad;
221            }
222        }
223
224        self.instances.did_modify_range(NSRange {
225            location: *offset as u64,
226            length: (next_offset - *offset) as u64,
227        });
228        *offset = next_offset;
229
230        ctx.command_encoder.draw_primitives_instanced(
231            metal::MTLPrimitiveType::Triangle,
232            0,
233            6,
234            layer.quads().len() as u64,
235        );
236    }
237}
238
239fn align_offset(offset: &mut usize) {
240    let r = *offset % 256;
241    if r > 0 {
242        *offset += 256 - r; // Align to a multiple of 256 to make Metal happy
243    }
244}
245
246fn build_pipeline_state(
247    device: &metal::DeviceRef,
248    library: &metal::LibraryRef,
249    label: &str,
250    vertex_fn_name: &str,
251    fragment_fn_name: &str,
252    pixel_format: metal::MTLPixelFormat,
253) -> Result<metal::RenderPipelineState> {
254    let vertex_fn = library
255        .get_function(vertex_fn_name, None)
256        .map_err(|message| anyhow!("error locating vertex function: {}", message))?;
257    let fragment_fn = library
258        .get_function(fragment_fn_name, None)
259        .map_err(|message| anyhow!("error locating fragment function: {}", message))?;
260
261    let descriptor = metal::RenderPipelineDescriptor::new();
262    descriptor.set_label(label);
263    descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
264    descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
265    let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
266    color_attachment.set_pixel_format(pixel_format);
267    color_attachment.set_blending_enabled(true);
268    color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
269    color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
270    color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::SourceAlpha);
271    color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::SourceAlpha);
272    color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
273    color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
274
275    device
276        .new_render_pipeline_state(&descriptor)
277        .map_err(|message| anyhow!("could not create render pipeline state: {}", message))
278}
279
280mod shaders {
281    #![allow(non_upper_case_globals)]
282    #![allow(non_camel_case_types)]
283    #![allow(non_snake_case)]
284
285    use crate::{color::ColorU, geometry::vector::Vector2F};
286    use std::mem;
287
288    include!(concat!(env!("OUT_DIR"), "/shaders.rs"));
289
290    pub trait ToFloat2 {
291        fn to_float2(&self) -> vector_float2;
292    }
293
294    pub trait ToUchar4 {
295        fn to_uchar4(&self) -> vector_uchar4;
296    }
297
298    impl ToFloat2 for (f32, f32) {
299        fn to_float2(&self) -> vector_float2 {
300            unsafe {
301                let mut output = mem::transmute::<_, u32>(self.1.to_bits()) as vector_float2;
302                output <<= 32;
303                output |= mem::transmute::<_, u32>(self.0.to_bits()) as vector_float2;
304                output
305            }
306        }
307    }
308
309    impl ToFloat2 for Vector2F {
310        fn to_float2(&self) -> vector_float2 {
311            unsafe {
312                let mut output = mem::transmute::<_, u32>(self.y().to_bits()) as vector_float2;
313                output <<= 32;
314                output |= mem::transmute::<_, u32>(self.x().to_bits()) as vector_float2;
315                output
316            }
317        }
318    }
319
320    impl ToUchar4 for ColorU {
321        fn to_uchar4(&self) -> vector_uchar4 {
322            let mut vec = self.a as vector_uchar4;
323            vec <<= 8;
324            vec |= self.b as vector_uchar4;
325            vec <<= 8;
326            vec |= self.g as vector_uchar4;
327            vec <<= 8;
328            vec |= self.r as vector_uchar4;
329            vec
330        }
331    }
332}