1use std::{ffi::c_void, mem};
2
3use self::shaders::ToUchar4;
4
5use super::window::RenderContext;
6use crate::{color::ColorU, scene::Layer, Scene};
7use anyhow::{anyhow, Result};
8use metal::{MTLResourceOptions, NSRange};
9use shaders::ToFloat2 as _;
10
11const SHADERS_METALLIB: &'static [u8] =
12 include_bytes!(concat!(env!("OUT_DIR"), "/shaders.metallib"));
13const INSTANCE_BUFFER_SIZE: u64 = 1024 * 1024;
14
15pub struct Renderer {
16 quad_pipeline_state: metal::RenderPipelineState,
17 shadow_pipeline_state: metal::RenderPipelineState,
18 unit_vertices: metal::Buffer,
19 instances: metal::Buffer,
20}
21
22impl Renderer {
23 pub fn new(device: &metal::DeviceRef, pixel_format: metal::MTLPixelFormat) -> Result<Self> {
24 let library = device
25 .new_library_with_data(SHADERS_METALLIB)
26 .map_err(|message| anyhow!("error building metal library: {}", message))?;
27
28 let unit_vertices = [
29 (0., 0.).to_float2(),
30 (1., 0.).to_float2(),
31 (0., 1.).to_float2(),
32 (0., 1.).to_float2(),
33 (1., 0.).to_float2(),
34 (1., 1.).to_float2(),
35 ];
36 let unit_vertices = device.new_buffer_with_data(
37 unit_vertices.as_ptr() as *const c_void,
38 (unit_vertices.len() * mem::size_of::<shaders::vector_float2>()) as u64,
39 MTLResourceOptions::StorageModeManaged,
40 );
41 let instances =
42 device.new_buffer(INSTANCE_BUFFER_SIZE, MTLResourceOptions::StorageModeManaged);
43
44 Ok(Self {
45 quad_pipeline_state: build_pipeline_state(
46 device,
47 &library,
48 "quad",
49 "quad_vertex",
50 "quad_fragment",
51 pixel_format,
52 )?,
53 shadow_pipeline_state: build_pipeline_state(
54 device,
55 &library,
56 "shadow",
57 "shadow_vertex",
58 "shadow_fragment",
59 pixel_format,
60 )?,
61 unit_vertices,
62 instances,
63 })
64 }
65
66 pub fn render(&mut self, scene: &Scene, ctx: &RenderContext) {
67 ctx.command_encoder.set_viewport(metal::MTLViewport {
68 originX: 0.0,
69 originY: 0.0,
70 width: ctx.drawable_size.x() as f64,
71 height: ctx.drawable_size.y() as f64,
72 znear: 0.0,
73 zfar: 1.0,
74 });
75
76 for layer in scene.layers() {
77 self.render_shadows(scene, layer, ctx);
78 self.render_quads(scene, layer, ctx);
79 }
80 }
81
82 fn render_shadows(&mut self, scene: &Scene, layer: &Layer, ctx: &RenderContext) {
83 ctx.command_encoder
84 .set_render_pipeline_state(&self.shadow_pipeline_state);
85 ctx.command_encoder.set_vertex_buffer(
86 shaders::GPUIShadowInputIndex_GPUIShadowInputIndexVertices as u64,
87 Some(&self.unit_vertices),
88 0,
89 );
90 ctx.command_encoder.set_vertex_buffer(
91 shaders::GPUIShadowInputIndex_GPUIShadowInputIndexShadows as u64,
92 Some(&self.instances),
93 0,
94 );
95 ctx.command_encoder.set_vertex_bytes(
96 shaders::GPUIShadowInputIndex_GPUIShadowInputIndexUniforms as u64,
97 mem::size_of::<shaders::GPUIUniforms>() as u64,
98 [shaders::GPUIUniforms {
99 viewport_size: ctx.drawable_size.to_float2(),
100 }]
101 .as_ptr() as *const c_void,
102 );
103
104 let batch_size = self.instances.length() as usize / mem::size_of::<shaders::GPUIShadow>();
105
106 let buffer_contents = self.instances.contents() as *mut shaders::GPUIShadow;
107 for shadow_batch in layer.shadows().chunks(batch_size) {
108 for (ix, shadow) in shadow_batch.iter().enumerate() {
109 let shape_bounds = shadow.bounds * scene.scale_factor();
110 let shader_shadow = shaders::GPUIShadow {
111 origin: shape_bounds.origin().to_float2(),
112 size: shape_bounds.size().to_float2(),
113 corner_radius: shadow.corner_radius,
114 sigma: shadow.sigma,
115 color: shadow.color.to_uchar4(),
116 };
117 unsafe {
118 *(buffer_contents.offset(ix as isize)) = shader_shadow;
119 }
120 }
121 self.instances.did_modify_range(NSRange {
122 location: 0,
123 length: (shadow_batch.len() * mem::size_of::<shaders::GPUIShadow>()) as u64,
124 });
125
126 ctx.command_encoder.draw_primitives_instanced(
127 metal::MTLPrimitiveType::Triangle,
128 0,
129 6,
130 shadow_batch.len() as u64,
131 );
132 }
133 }
134
135 fn render_quads(&mut self, scene: &Scene, layer: &Layer, ctx: &RenderContext) {
136 ctx.command_encoder
137 .set_render_pipeline_state(&self.quad_pipeline_state);
138 ctx.command_encoder.set_vertex_buffer(
139 shaders::GPUIQuadInputIndex_GPUIQuadInputIndexVertices as u64,
140 Some(&self.unit_vertices),
141 0,
142 );
143 ctx.command_encoder.set_vertex_buffer(
144 shaders::GPUIQuadInputIndex_GPUIQuadInputIndexQuads as u64,
145 Some(&self.instances),
146 0,
147 );
148 ctx.command_encoder.set_vertex_bytes(
149 shaders::GPUIQuadInputIndex_GPUIQuadInputIndexUniforms as u64,
150 mem::size_of::<shaders::GPUIUniforms>() as u64,
151 [shaders::GPUIUniforms {
152 viewport_size: ctx.drawable_size.to_float2(),
153 }]
154 .as_ptr() as *const c_void,
155 );
156
157 let batch_size = self.instances.length() as usize / mem::size_of::<shaders::GPUIQuad>();
158
159 let buffer_contents = self.instances.contents() as *mut shaders::GPUIQuad;
160 for quad_batch in layer.quads().chunks(batch_size) {
161 for (ix, quad) in quad_batch.iter().enumerate() {
162 let bounds = quad.bounds * scene.scale_factor();
163 let border_width = quad.border.width * scene.scale_factor();
164 let shader_quad = shaders::GPUIQuad {
165 origin: bounds.origin().to_float2(),
166 size: bounds.size().to_float2(),
167 background_color: quad
168 .background
169 .unwrap_or(ColorU::transparent_black())
170 .to_uchar4(),
171 border_top: border_width * (quad.border.top as usize as f32),
172 border_right: border_width * (quad.border.right as usize as f32),
173 border_bottom: border_width * (quad.border.bottom as usize as f32),
174 border_left: border_width * (quad.border.left as usize as f32),
175 border_color: quad
176 .border
177 .color
178 .unwrap_or(ColorU::transparent_black())
179 .to_uchar4(),
180 corner_radius: quad.corner_radius * scene.scale_factor(),
181 };
182 unsafe {
183 *(buffer_contents.offset(ix as isize)) = shader_quad;
184 }
185 }
186 self.instances.did_modify_range(NSRange {
187 location: 0,
188 length: (quad_batch.len() * mem::size_of::<shaders::GPUIQuad>()) as u64,
189 });
190
191 ctx.command_encoder.draw_primitives_instanced(
192 metal::MTLPrimitiveType::Triangle,
193 0,
194 6,
195 quad_batch.len() as u64,
196 );
197 }
198 }
199}
200
201fn build_pipeline_state(
202 device: &metal::DeviceRef,
203 library: &metal::LibraryRef,
204 label: &str,
205 vertex_fn_name: &str,
206 fragment_fn_name: &str,
207 pixel_format: metal::MTLPixelFormat,
208) -> Result<metal::RenderPipelineState> {
209 let vertex_fn = library
210 .get_function(vertex_fn_name, None)
211 .map_err(|message| anyhow!("error locating vertex function: {}", message))?;
212 let fragment_fn = library
213 .get_function(fragment_fn_name, None)
214 .map_err(|message| anyhow!("error locating fragment function: {}", message))?;
215
216 let descriptor = metal::RenderPipelineDescriptor::new();
217 descriptor.set_label(label);
218 descriptor.set_vertex_function(Some(vertex_fn.as_ref()));
219 descriptor.set_fragment_function(Some(fragment_fn.as_ref()));
220 let color_attachment = descriptor.color_attachments().object_at(0).unwrap();
221 color_attachment.set_pixel_format(pixel_format);
222 color_attachment.set_blending_enabled(true);
223 color_attachment.set_rgb_blend_operation(metal::MTLBlendOperation::Add);
224 color_attachment.set_alpha_blend_operation(metal::MTLBlendOperation::Add);
225 color_attachment.set_source_rgb_blend_factor(metal::MTLBlendFactor::SourceAlpha);
226 color_attachment.set_source_alpha_blend_factor(metal::MTLBlendFactor::SourceAlpha);
227 color_attachment.set_destination_rgb_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
228 color_attachment.set_destination_alpha_blend_factor(metal::MTLBlendFactor::OneMinusSourceAlpha);
229
230 device
231 .new_render_pipeline_state(&descriptor)
232 .map_err(|message| anyhow!("could not create render pipeline state: {}", message))
233}
234
235mod shaders {
236 #![allow(non_upper_case_globals)]
237 #![allow(non_camel_case_types)]
238 #![allow(non_snake_case)]
239
240 use crate::{color::ColorU, geometry::vector::Vector2F};
241 use std::mem;
242
243 include!(concat!(env!("OUT_DIR"), "/shaders.rs"));
244
245 pub trait ToFloat2 {
246 fn to_float2(&self) -> vector_float2;
247 }
248
249 pub trait ToUchar4 {
250 fn to_uchar4(&self) -> vector_uchar4;
251 }
252
253 impl ToFloat2 for (f32, f32) {
254 fn to_float2(&self) -> vector_float2 {
255 unsafe {
256 let mut output = mem::transmute::<_, u32>(self.1.to_bits()) as vector_float2;
257 output <<= 32;
258 output |= mem::transmute::<_, u32>(self.0.to_bits()) as vector_float2;
259 output
260 }
261 }
262 }
263
264 impl ToFloat2 for Vector2F {
265 fn to_float2(&self) -> vector_float2 {
266 unsafe {
267 let mut output = mem::transmute::<_, u32>(self.y().to_bits()) as vector_float2;
268 output <<= 32;
269 output |= mem::transmute::<_, u32>(self.x().to_bits()) as vector_float2;
270 output
271 }
272 }
273 }
274
275 impl ToUchar4 for ColorU {
276 fn to_uchar4(&self) -> vector_uchar4 {
277 let mut vec = self.a as vector_uchar4;
278 vec <<= 8;
279 vec |= self.b as vector_uchar4;
280 vec <<= 8;
281 vec |= self.g as vector_uchar4;
282 vec <<= 8;
283 vec |= self.r as vector_uchar4;
284 vec
285 }
286 }
287}