@@ -9,7 +9,7 @@ use gpui::{
Tiling, WindowAppearance, WindowBackgroundAppearance, WindowBounds, WindowControlArea,
WindowDecorations, WindowKind, WindowParams, px,
};
-use gpui_wgpu::{CompositorGpuHint, WgpuContext, WgpuRenderer, WgpuSurfaceConfig};
+use gpui_wgpu::{CompositorGpuHint, WgpuRenderer, WgpuSurfaceConfig};
use collections::FxHashSet;
use raw_window_handle as rwh;
@@ -259,6 +259,8 @@ pub struct X11WindowState {
executor: ForegroundExecutor,
atoms: XcbAtoms,
x_root_window: xproto::Window,
+ x_screen_index: usize,
+ visual_id: u32,
pub(crate) counter_id: sync::Counter,
pub(crate) last_sync_counter: Option<sync::Int64>,
bounds: Bounds<Pixels>,
@@ -391,7 +393,7 @@ impl X11WindowState {
handle: AnyWindowHandle,
client: X11ClientStatePtr,
executor: ForegroundExecutor,
- gpu_context: &mut Option<WgpuContext>,
+ gpu_context: gpui_wgpu::GpuContext,
compositor_gpu: Option<CompositorGpuHint>,
params: WindowParams,
xcb: &Rc<XCBConnection>,
@@ -711,6 +713,8 @@ impl X11WindowState {
executor,
display,
x_root_window: visual_set.root,
+ x_screen_index,
+ visual_id: visual.id,
bounds: bounds.to_pixels(scale_factor),
scale_factor,
renderer,
@@ -803,7 +807,7 @@ impl X11Window {
handle: AnyWindowHandle,
client: X11ClientStatePtr,
executor: ForegroundExecutor,
- gpu_context: &mut Option<WgpuContext>,
+ gpu_context: gpui_wgpu::GpuContext,
compositor_gpu: Option<CompositorGpuHint>,
params: WindowParams,
xcb: &Rc<XCBConnection>,
@@ -1157,13 +1161,11 @@ impl X11WindowStatePtr {
}
pub fn set_bounds(&self, bounds: Bounds<i32>) -> anyhow::Result<()> {
- let mut resize_args = None;
- let is_resize;
- {
+ let (is_resize, content_size, scale_factor) = {
let mut state = self.state.borrow_mut();
let bounds = bounds.map(|f| px(f as f32 / state.scale_factor));
- is_resize = bounds.size.width != state.bounds.size.width
+ let is_resize = bounds.size.width != state.bounds.size.width
|| bounds.size.height != state.bounds.size.height;
// If it's a resize event (only width/height changed), we ignore `bounds.origin`
@@ -1175,22 +1177,19 @@ impl X11WindowStatePtr {
}
let gpu_size = query_render_extent(&self.xcb, self.x_window)?;
- if true {
- state.renderer.update_drawable_size(gpu_size);
- resize_args = Some((state.content_size(), state.scale_factor));
- }
+ state.renderer.update_drawable_size(gpu_size);
+ let result = (is_resize, state.content_size(), state.scale_factor);
if let Some(value) = state.last_sync_counter.take() {
check_reply(
|| "X11 sync SetCounter failed.",
sync::set_counter(&self.xcb, state.counter_id, value),
)?;
}
- }
+ result
+ };
let mut callbacks = self.callbacks.borrow_mut();
- if let Some((content_size, scale_factor)) = resize_args
- && let Some(ref mut fun) = callbacks.resize
- {
+ if let Some(ref mut fun) = callbacks.resize {
fun(content_size, scale_factor)
}
@@ -1483,6 +1482,7 @@ impl PlatformWindow for X11Window {
let state = ref_cell.borrow();
state
.gpu_context
+ .borrow()
.as_ref()
.is_some_and(|ctx| ctx.supports_dual_source_blending())
})
@@ -1577,6 +1577,39 @@ impl PlatformWindow for X11Window {
fn draw(&self, scene: &Scene) {
let mut inner = self.0.state.borrow_mut();
+
+ if inner.renderer.device_lost() {
+ let raw_window = RawWindow {
+ connection: as_raw_xcb_connection::AsRawXcbConnection::as_raw_xcb_connection(
+ &*self.0.xcb,
+ ) as *mut _,
+ screen_id: inner.x_screen_index,
+ window_id: self.0.x_window,
+ visual_id: inner.visual_id,
+ };
+ let display_handle = rwh::HasDisplayHandle::display_handle(&raw_window)
+ .unwrap()
+ .as_raw();
+ let window_handle = rwh::HasWindowHandle::window_handle(&raw_window)
+ .unwrap()
+ .as_raw();
+
+ inner
+ .renderer
+ .recover(display_handle, window_handle)
+ .unwrap_or_else(|err| {
+ panic!(
+ "GPU device lost and recovery failed. \
+ This may happen after system suspend/resume. \
+ Please restart the application.\n\nError: {err}"
+ )
+ });
+
+ // The current scene references atlas textures that were cleared during recovery.
+ // Skip this frame and let the next frame rebuild the scene with fresh textures.
+ return;
+ }
+
inner.renderer.draw(scene);
}
@@ -1,6 +1,4 @@
-#[cfg(not(target_family = "wasm"))]
-use crate::CompositorGpuHint;
-use crate::{WgpuAtlas, WgpuContext};
+use crate::{CompositorGpuHint, WgpuAtlas, WgpuContext};
use bytemuck::{Pod, Zeroable};
use gpui::{
AtlasTextureId, Background, Bounds, DevicePixels, GpuSpecs, MonochromeSprite, Path, Point,
@@ -10,7 +8,9 @@ use gpui::{
use log::warn;
#[cfg(not(target_family = "wasm"))]
use raw_window_handle::{HasDisplayHandle, HasWindowHandle};
+use std::cell::RefCell;
use std::num::NonZeroU64;
+use std::rc::Rc;
use std::sync::{Arc, Mutex};
#[repr(C)]
@@ -93,28 +93,42 @@ struct WgpuBindGroupLayouts {
surfaces: wgpu::BindGroupLayout,
}
-pub struct WgpuRenderer {
+/// Shared GPU context reference, used to coordinate device recovery across multiple windows.
+pub type GpuContext = Rc<RefCell<Option<WgpuContext>>>;
+
+/// GPU resources that must be dropped together during device recovery.
+struct WgpuResources {
device: Arc<wgpu::Device>,
queue: Arc<wgpu::Queue>,
surface: wgpu::Surface<'static>,
- surface_config: wgpu::SurfaceConfiguration,
pipelines: WgpuPipelines,
bind_group_layouts: WgpuBindGroupLayouts,
- atlas: Arc<WgpuAtlas>,
atlas_sampler: wgpu::Sampler,
globals_buffer: wgpu::Buffer,
- path_globals_offset: u64,
- gamma_offset: u64,
globals_bind_group: wgpu::BindGroup,
path_globals_bind_group: wgpu::BindGroup,
instance_buffer: wgpu::Buffer,
- instance_buffer_capacity: u64,
- max_buffer_size: u64,
- storage_buffer_alignment: u64,
path_intermediate_texture: Option<wgpu::Texture>,
path_intermediate_view: Option<wgpu::TextureView>,
path_msaa_texture: Option<wgpu::Texture>,
path_msaa_view: Option<wgpu::TextureView>,
+}
+
+pub struct WgpuRenderer {
+ /// Shared GPU context for device recovery coordination (unused on WASM).
+ #[allow(dead_code)]
+ context: Option<GpuContext>,
+ /// Compositor GPU hint for adapter selection (unused on WASM).
+ #[allow(dead_code)]
+ compositor_gpu: Option<CompositorGpuHint>,
+ resources: Option<WgpuResources>,
+ surface_config: wgpu::SurfaceConfiguration,
+ atlas: Arc<WgpuAtlas>,
+ path_globals_offset: u64,
+ gamma_offset: u64,
+ instance_buffer_capacity: u64,
+ max_buffer_size: u64,
+ storage_buffer_alignment: u64,
rendering_params: RenderingParameters,
dual_source_blending: bool,
adapter_info: wgpu::AdapterInfo,
@@ -123,17 +137,34 @@ pub struct WgpuRenderer {
max_texture_size: u32,
last_error: Arc<Mutex<Option<String>>>,
failed_frame_count: u32,
+ device_lost: std::sync::Arc<std::sync::atomic::AtomicBool>,
}
impl WgpuRenderer {
+ fn resources(&self) -> &WgpuResources {
+ self.resources
+ .as_ref()
+ .expect("GPU resources not available")
+ }
+
+ fn resources_mut(&mut self) -> &mut WgpuResources {
+ self.resources
+ .as_mut()
+ .expect("GPU resources not available")
+ }
+
/// Creates a new WgpuRenderer from raw window handles.
///
+ /// The `gpu_context` is a shared reference that coordinates GPU context across
+ /// multiple windows. The first window to create a renderer will initialize the
+ /// context; subsequent windows will share it.
+ ///
/// # Safety
/// The caller must ensure that the window handle remains valid for the lifetime
/// of the returned renderer.
#[cfg(not(target_family = "wasm"))]
pub fn new<W: HasWindowHandle + HasDisplayHandle>(
- gpu_context: &mut Option<WgpuContext>,
+ gpu_context: GpuContext,
window: &W,
config: WgpuSurfaceConfig,
compositor_gpu: Option<CompositorGpuHint>,
@@ -154,6 +185,7 @@ impl WgpuRenderer {
// The surface must be created with the same instance that will be used for
// adapter selection, otherwise wgpu will panic.
let instance = gpu_context
+ .borrow()
.as_ref()
.map(|ctx| ctx.instance.clone())
.unwrap_or_else(WgpuContext::instance);
@@ -167,15 +199,28 @@ impl WgpuRenderer {
.map_err(|e| anyhow::anyhow!("Failed to create surface: {e}"))?
};
- let context = match gpu_context {
+ let mut ctx_ref = gpu_context.borrow_mut();
+ let context = match ctx_ref.as_mut() {
Some(context) => {
context.check_compatible_with_surface(&surface)?;
context
}
- None => gpu_context.insert(WgpuContext::new(instance, &surface, compositor_gpu)?),
+ None => ctx_ref.insert(WgpuContext::new(instance, &surface, compositor_gpu)?),
};
- Self::new_with_surface(context, surface, config)
+ let atlas = Arc::new(WgpuAtlas::new(
+ Arc::clone(&context.device),
+ Arc::clone(&context.queue),
+ ));
+
+ Self::new_internal(
+ Some(Rc::clone(&gpu_context)),
+ context,
+ surface,
+ config,
+ compositor_gpu,
+ atlas,
+ )
}
#[cfg(target_family = "wasm")]
@@ -188,13 +233,22 @@ impl WgpuRenderer {
.instance
.create_surface(wgpu::SurfaceTarget::Canvas(canvas.clone()))
.map_err(|e| anyhow::anyhow!("Failed to create surface: {e}"))?;
- Self::new_with_surface(context, surface, config)
+
+ let atlas = Arc::new(WgpuAtlas::new(
+ Arc::clone(&context.device),
+ Arc::clone(&context.queue),
+ ));
+
+ Self::new_internal(None, context, surface, config, None, atlas)
}
- fn new_with_surface(
+ fn new_internal(
+ gpu_context: Option<GpuContext>,
context: &WgpuContext,
surface: wgpu::Surface<'static>,
config: WgpuSurfaceConfig,
+ compositor_gpu: Option<CompositorGpuHint>,
+ atlas: Arc<WgpuAtlas>,
) -> anyhow::Result<Self> {
let surface_caps = surface.get_capabilities(&context.adapter);
let preferred_formats = [
@@ -289,7 +343,6 @@ impl WgpuRenderer {
dual_source_blending,
);
- let atlas = Arc::new(WgpuAtlas::new(Arc::clone(&device), Arc::clone(&queue)));
let atlas_sampler = device.create_sampler(&wgpu::SamplerDescriptor {
label: Some("atlas_sampler"),
mag_filter: wgpu::FilterMode::Linear,
@@ -375,30 +428,36 @@ impl WgpuRenderer {
*guard = Some(error.to_string());
}));
- Ok(Self {
+ let resources = WgpuResources {
device,
queue,
surface,
- surface_config,
pipelines,
bind_group_layouts,
- atlas,
atlas_sampler,
globals_buffer,
- path_globals_offset,
- gamma_offset,
globals_bind_group,
path_globals_bind_group,
instance_buffer,
- instance_buffer_capacity: initial_instance_buffer_capacity,
- max_buffer_size,
- storage_buffer_alignment,
// Defer intermediate texture creation to first draw call via ensure_intermediate_textures().
// This avoids panics when the device/surface is in an invalid state during initialization.
path_intermediate_texture: None,
path_intermediate_view: None,
path_msaa_texture: None,
path_msaa_view: None,
+ };
+
+ Ok(Self {
+ context: gpu_context,
+ compositor_gpu,
+ resources: Some(resources),
+ surface_config,
+ atlas,
+ path_globals_offset,
+ gamma_offset,
+ instance_buffer_capacity: initial_instance_buffer_capacity,
+ max_buffer_size,
+ storage_buffer_alignment,
rendering_params,
dual_source_blending,
adapter_info,
@@ -407,6 +466,7 @@ impl WgpuRenderer {
max_texture_size,
last_error,
failed_frame_count: 0,
+ device_lost: context.device_lost_flag(),
})
}
@@ -855,8 +915,14 @@ impl WgpuRenderer {
);
}
+ self.surface_config.width = clamped_width.max(1);
+ self.surface_config.height = clamped_height.max(1);
+ let surface_config = self.surface_config.clone();
+
+ let resources = self.resources_mut();
+
// Wait for any in-flight GPU work to complete before destroying textures
- if let Err(e) = self.device.poll(wgpu::PollType::Wait {
+ if let Err(e) = resources.device.poll(wgpu::PollType::Wait {
submission_index: None,
timeout: None,
}) {
@@ -864,55 +930,53 @@ impl WgpuRenderer {
}
// Destroy old textures before allocating new ones to avoid GPU memory spikes
- if let Some(ref texture) = self.path_intermediate_texture {
+ if let Some(ref texture) = resources.path_intermediate_texture {
texture.destroy();
}
- if let Some(ref texture) = self.path_msaa_texture {
+ if let Some(ref texture) = resources.path_msaa_texture {
texture.destroy();
}
- self.surface_config.width = clamped_width.max(1);
- self.surface_config.height = clamped_height.max(1);
- self.surface.configure(&self.device, &self.surface_config);
+ resources
+ .surface
+ .configure(&resources.device, &surface_config);
// Invalidate intermediate textures - they will be lazily recreated
// in draw() after we confirm the surface is healthy. This avoids
// panics when the device/surface is in an invalid state during resize.
- self.path_intermediate_texture = None;
- self.path_intermediate_view = None;
- self.path_msaa_texture = None;
- self.path_msaa_view = None;
+ resources.path_intermediate_texture = None;
+ resources.path_intermediate_view = None;
+ resources.path_msaa_texture = None;
+ resources.path_msaa_view = None;
}
}
fn ensure_intermediate_textures(&mut self) {
- if self.path_intermediate_texture.is_some() {
+ if self.resources().path_intermediate_texture.is_some() {
return;
}
- let (path_intermediate_texture, path_intermediate_view) = {
- let (t, v) = Self::create_path_intermediate(
- &self.device,
- self.surface_config.format,
- self.surface_config.width,
- self.surface_config.height,
- );
- (Some(t), Some(v))
- };
- self.path_intermediate_texture = path_intermediate_texture;
- self.path_intermediate_view = path_intermediate_view;
+ let format = self.surface_config.format;
+ let width = self.surface_config.width;
+ let height = self.surface_config.height;
+ let path_sample_count = self.rendering_params.path_sample_count;
+ let resources = self.resources_mut();
+
+ let (t, v) = Self::create_path_intermediate(&resources.device, format, width, height);
+ resources.path_intermediate_texture = Some(t);
+ resources.path_intermediate_view = Some(v);
let (path_msaa_texture, path_msaa_view) = Self::create_msaa_if_needed(
- &self.device,
- self.surface_config.format,
- self.surface_config.width,
- self.surface_config.height,
- self.rendering_params.path_sample_count,
+ &resources.device,
+ format,
+ width,
+ height,
+ path_sample_count,
)
.map(|(t, v)| (Some(t), Some(v)))
.unwrap_or((None, None));
- self.path_msaa_texture = path_msaa_texture;
- self.path_msaa_view = path_msaa_view;
+ resources.path_msaa_texture = path_msaa_texture;
+ resources.path_msaa_view = path_msaa_view;
}
pub fn update_transparency(&mut self, transparent: bool) {
@@ -924,14 +988,20 @@ impl WgpuRenderer {
if new_alpha_mode != self.surface_config.alpha_mode {
self.surface_config.alpha_mode = new_alpha_mode;
- self.surface.configure(&self.device, &self.surface_config);
- self.pipelines = Self::create_pipelines(
- &self.device,
- &self.bind_group_layouts,
- self.surface_config.format,
- self.surface_config.alpha_mode,
- self.rendering_params.path_sample_count,
- self.dual_source_blending,
+ let surface_config = self.surface_config.clone();
+ let path_sample_count = self.rendering_params.path_sample_count;
+ let dual_source_blending = self.dual_source_blending;
+ let resources = self.resources_mut();
+ resources
+ .surface
+ .configure(&resources.device, &surface_config);
+ resources.pipelines = Self::create_pipelines(
+ &resources.device,
+ &resources.bind_group_layouts,
+ surface_config.format,
+ surface_config.alpha_mode,
+ path_sample_count,
+ dual_source_blending,
);
}
}
@@ -982,14 +1052,20 @@ impl WgpuRenderer {
self.atlas.before_frame();
- let frame = match self.surface.get_current_texture() {
+ let texture_result = self.resources().surface.get_current_texture();
+ let frame = match texture_result {
Ok(frame) => frame,
Err(wgpu::SurfaceError::Lost | wgpu::SurfaceError::Outdated) => {
- self.surface.configure(&self.device, &self.surface_config);
+ let surface_config = self.surface_config.clone();
+ let resources = self.resources_mut();
+ resources
+ .surface
+ .configure(&resources.device, &surface_config);
return;
}
Err(e) => {
- log::error!("Failed to acquire surface texture: {e}");
+ *self.last_error.lock().unwrap() =
+ Some(format!("Failed to acquire surface texture: {e}"));
return;
}
};
@@ -1028,28 +1104,35 @@ impl WgpuRenderer {
..globals
};
- self.queue
- .write_buffer(&self.globals_buffer, 0, bytemuck::bytes_of(&globals));
- self.queue.write_buffer(
- &self.globals_buffer,
- self.path_globals_offset,
- bytemuck::bytes_of(&path_globals),
- );
- self.queue.write_buffer(
- &self.globals_buffer,
- self.gamma_offset,
- bytemuck::bytes_of(&gamma_params),
- );
+ {
+ let resources = self.resources();
+ resources.queue.write_buffer(
+ &resources.globals_buffer,
+ 0,
+ bytemuck::bytes_of(&globals),
+ );
+ resources.queue.write_buffer(
+ &resources.globals_buffer,
+ self.path_globals_offset,
+ bytemuck::bytes_of(&path_globals),
+ );
+ resources.queue.write_buffer(
+ &resources.globals_buffer,
+ self.gamma_offset,
+ bytemuck::bytes_of(&gamma_params),
+ );
+ }
loop {
let mut instance_offset: u64 = 0;
let mut overflow = false;
- let mut encoder = self
- .device
- .create_command_encoder(&wgpu::CommandEncoderDescriptor {
- label: Some("main_encoder"),
- });
+ let mut encoder =
+ self.resources()
+ .device
+ .create_command_encoder(&wgpu::CommandEncoderDescriptor {
+ label: Some("main_encoder"),
+ });
{
let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
@@ -1169,7 +1252,9 @@ impl WgpuRenderer {
continue;
}
- self.queue.submit(std::iter::once(encoder.finish()));
+ self.resources()
+ .queue
+ .submit(std::iter::once(encoder.finish()));
frame.present();
return;
}
@@ -1185,7 +1270,7 @@ impl WgpuRenderer {
self.draw_instances(
data,
quads.len() as u32,
- &self.pipelines.quads,
+ &self.resources().pipelines.quads,
instance_offset,
pass,
)
@@ -1201,7 +1286,7 @@ impl WgpuRenderer {
self.draw_instances(
data,
shadows.len() as u32,
- &self.pipelines.shadows,
+ &self.resources().pipelines.shadows,
instance_offset,
pass,
)
@@ -1217,7 +1302,7 @@ impl WgpuRenderer {
self.draw_instances(
data,
underlines.len() as u32,
- &self.pipelines.underlines,
+ &self.resources().pipelines.underlines,
instance_offset,
pass,
)
@@ -1236,7 +1321,7 @@ impl WgpuRenderer {
data,
sprites.len() as u32,
&tex_info.view,
- &self.pipelines.mono_sprites,
+ &self.resources().pipelines.mono_sprites,
instance_offset,
pass,
)
@@ -1251,11 +1336,12 @@ impl WgpuRenderer {
) -> bool {
let tex_info = self.atlas.get_texture_info(texture_id);
let data = unsafe { Self::instance_bytes(sprites) };
- let pipeline = self
+ let resources = self.resources();
+ let pipeline = resources
.pipelines
.subpixel_sprites
.as_ref()
- .unwrap_or(&self.pipelines.mono_sprites);
+ .unwrap_or(&resources.pipelines.mono_sprites);
self.draw_instances_with_texture(
data,
sprites.len() as u32,
@@ -1279,7 +1365,7 @@ impl WgpuRenderer {
data,
sprites.len() as u32,
&tex_info.view,
- &self.pipelines.poly_sprites,
+ &self.resources().pipelines.poly_sprites,
instance_offset,
pass,
)
@@ -1299,16 +1385,19 @@ impl WgpuRenderer {
let Some((offset, size)) = self.write_to_instance_buffer(instance_offset, data) else {
return false;
};
- let bind_group = self.device.create_bind_group(&wgpu::BindGroupDescriptor {
- label: None,
- layout: &self.bind_group_layouts.instances,
- entries: &[wgpu::BindGroupEntry {
- binding: 0,
- resource: self.instance_binding(offset, size),
- }],
- });
+ let resources = self.resources();
+ let bind_group = resources
+ .device
+ .create_bind_group(&wgpu::BindGroupDescriptor {
+ label: None,
+ layout: &resources.bind_group_layouts.instances,
+ entries: &[wgpu::BindGroupEntry {
+ binding: 0,
+ resource: self.instance_binding(offset, size),
+ }],
+ });
pass.set_pipeline(pipeline);
- pass.set_bind_group(0, &self.globals_bind_group, &[]);
+ pass.set_bind_group(0, &resources.globals_bind_group, &[]);
pass.set_bind_group(1, &bind_group, &[]);
pass.draw(0..4, 0..instance_count);
true
@@ -1329,26 +1418,29 @@ impl WgpuRenderer {
let Some((offset, size)) = self.write_to_instance_buffer(instance_offset, data) else {
return false;
};
- let bind_group = self.device.create_bind_group(&wgpu::BindGroupDescriptor {
- label: None,
- layout: &self.bind_group_layouts.instances_with_texture,
- entries: &[
- wgpu::BindGroupEntry {
- binding: 0,
- resource: self.instance_binding(offset, size),
- },
- wgpu::BindGroupEntry {
- binding: 1,
- resource: wgpu::BindingResource::TextureView(texture_view),
- },
- wgpu::BindGroupEntry {
- binding: 2,
- resource: wgpu::BindingResource::Sampler(&self.atlas_sampler),
- },
- ],
- });
+ let resources = self.resources();
+ let bind_group = resources
+ .device
+ .create_bind_group(&wgpu::BindGroupDescriptor {
+ label: None,
+ layout: &resources.bind_group_layouts.instances_with_texture,
+ entries: &[
+ wgpu::BindGroupEntry {
+ binding: 0,
+ resource: self.instance_binding(offset, size),
+ },
+ wgpu::BindGroupEntry {
+ binding: 1,
+ resource: wgpu::BindingResource::TextureView(texture_view),
+ },
+ wgpu::BindGroupEntry {
+ binding: 2,
+ resource: wgpu::BindingResource::Sampler(&resources.atlas_sampler),
+ },
+ ],
+ });
pass.set_pipeline(pipeline);
- pass.set_bind_group(0, &self.globals_bind_group, &[]);
+ pass.set_bind_group(0, &resources.globals_bind_group, &[]);
pass.set_bind_group(1, &bind_group, &[]);
pass.draw(0..4, 0..instance_count);
true
@@ -1386,7 +1478,8 @@ impl WgpuRenderer {
vec![PathSprite { bounds }]
};
- let Some(path_intermediate_view) = self.path_intermediate_view.as_ref() else {
+ let resources = self.resources();
+ let Some(path_intermediate_view) = resources.path_intermediate_view.as_ref() else {
return true;
};
@@ -1395,7 +1488,7 @@ impl WgpuRenderer {
sprite_data,
sprites.len() as u32,
path_intermediate_view,
- &self.pipelines.paths,
+ &resources.pipelines.paths,
instance_offset,
pass,
)
@@ -1429,20 +1522,23 @@ impl WgpuRenderer {
return false;
};
- let data_bind_group = self.device.create_bind_group(&wgpu::BindGroupDescriptor {
- label: Some("path_rasterization_bind_group"),
- layout: &self.bind_group_layouts.instances,
- entries: &[wgpu::BindGroupEntry {
- binding: 0,
- resource: self.instance_binding(vertex_offset, vertex_size),
- }],
- });
+ let resources = self.resources();
+ let data_bind_group = resources
+ .device
+ .create_bind_group(&wgpu::BindGroupDescriptor {
+ label: Some("path_rasterization_bind_group"),
+ layout: &resources.bind_group_layouts.instances,
+ entries: &[wgpu::BindGroupEntry {
+ binding: 0,
+ resource: self.instance_binding(vertex_offset, vertex_size),
+ }],
+ });
- let Some(path_intermediate_view) = self.path_intermediate_view.as_ref() else {
+ let Some(path_intermediate_view) = resources.path_intermediate_view.as_ref() else {
return true;
};
- let (target_view, resolve_target) = if let Some(ref msaa_view) = self.path_msaa_view {
+ let (target_view, resolve_target) = if let Some(ref msaa_view) = resources.path_msaa_view {
(msaa_view, Some(path_intermediate_view))
} else {
(path_intermediate_view, None)
@@ -1464,8 +1560,8 @@ impl WgpuRenderer {
..Default::default()
});
- pass.set_pipeline(&self.pipelines.path_rasterization);
- pass.set_bind_group(0, &self.path_globals_bind_group, &[]);
+ pass.set_pipeline(&resources.pipelines.path_rasterization);
+ pass.set_bind_group(0, &resources.path_globals_bind_group, &[]);
pass.set_bind_group(1, &data_bind_group, &[]);
pass.draw(0..vertices.len() as u32, 0..1);
}
@@ -1476,7 +1572,8 @@ impl WgpuRenderer {
fn grow_instance_buffer(&mut self) {
let new_capacity = (self.instance_buffer_capacity * 2).min(self.max_buffer_size);
log::info!("increased instance buffer size to {}", new_capacity);
- self.instance_buffer = self.device.create_buffer(&wgpu::BufferDescriptor {
+ let resources = self.resources_mut();
+ resources.instance_buffer = resources.device.create_buffer(&wgpu::BufferDescriptor {
label: Some("instance_buffer"),
size: new_capacity,
usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
@@ -1495,14 +1592,17 @@ impl WgpuRenderer {
if offset + size > self.instance_buffer_capacity {
return None;
}
- self.queue.write_buffer(&self.instance_buffer, offset, data);
+ let resources = self.resources();
+ resources
+ .queue
+ .write_buffer(&resources.instance_buffer, offset, data);
*instance_offset = offset + size;
Some((offset, NonZeroU64::new(size).expect("size is at least 16")))
}
fn instance_binding(&self, offset: u64, size: NonZeroU64) -> wgpu::BindingResource<'_> {
wgpu::BindingResource::Buffer(wgpu::BufferBinding {
- buffer: &self.instance_buffer,
+ buffer: &self.resources().instance_buffer,
offset,
size: Some(size),
})
@@ -1511,6 +1611,97 @@ impl WgpuRenderer {
pub fn destroy(&mut self) {
// wgpu resources are automatically cleaned up when dropped
}
+
+ /// Returns true if the GPU device was lost and recovery is needed.
+ pub fn device_lost(&self) -> bool {
+ self.device_lost.load(std::sync::atomic::Ordering::SeqCst)
+ }
+
+ /// Recovers from a lost GPU device by recreating the renderer with a new context.
+ ///
+ /// Call this after detecting `device_lost()` returns true.
+ ///
+ /// This method coordinates recovery across multiple windows:
+ /// - The first window to call this will recreate the shared context
+ /// - Subsequent windows will adopt the already-recovered context
+ #[cfg(not(target_family = "wasm"))]
+ pub fn recover(
+ &mut self,
+ raw_display_handle: raw_window_handle::RawDisplayHandle,
+ raw_window_handle: raw_window_handle::RawWindowHandle,
+ ) -> anyhow::Result<()> {
+ let gpu_context = self.context.as_ref().expect("recover requires gpu_context");
+
+ // Check if another window already recovered the context
+ let needs_new_context = gpu_context
+ .borrow()
+ .as_ref()
+ .is_none_or(|ctx| ctx.device_lost());
+
+ let surface = if needs_new_context {
+ log::warn!("GPU device lost, recreating context...");
+
+ // Drop old resources to release Arc<Device>/Arc<Queue> and GPU resources
+ self.resources = None;
+ *gpu_context.borrow_mut() = None;
+
+ // Wait for GPU driver to stabilize (350ms copied from windows :shrug:)
+ std::thread::sleep(std::time::Duration::from_millis(350));
+
+ let instance = WgpuContext::instance();
+ let surface = create_surface(&instance, raw_display_handle, raw_window_handle)?;
+ let new_context = WgpuContext::new(instance, &surface, self.compositor_gpu)?;
+ *gpu_context.borrow_mut() = Some(new_context);
+ surface
+ } else {
+ let ctx_ref = gpu_context.borrow();
+ let instance = &ctx_ref.as_ref().unwrap().instance;
+ create_surface(instance, raw_display_handle, raw_window_handle)?
+ };
+
+ let config = WgpuSurfaceConfig {
+ size: gpui::Size {
+ width: gpui::DevicePixels(self.surface_config.width as i32),
+ height: gpui::DevicePixels(self.surface_config.height as i32),
+ },
+ transparent: self.surface_config.alpha_mode != wgpu::CompositeAlphaMode::Opaque,
+ };
+ let gpu_context = Rc::clone(gpu_context);
+ let ctx_ref = gpu_context.borrow();
+ let context = ctx_ref.as_ref().expect("context should exist");
+
+ self.resources = None;
+ self.atlas
+ .handle_device_lost(Arc::clone(&context.device), Arc::clone(&context.queue));
+
+ *self = Self::new_internal(
+ Some(gpu_context.clone()),
+ context,
+ surface,
+ config,
+ self.compositor_gpu,
+ self.atlas.clone(),
+ )?;
+
+ log::info!("GPU recovery complete");
+ Ok(())
+ }
+}
+
+#[cfg(not(target_family = "wasm"))]
+fn create_surface(
+ instance: &wgpu::Instance,
+ raw_display_handle: raw_window_handle::RawDisplayHandle,
+ raw_window_handle: raw_window_handle::RawWindowHandle,
+) -> anyhow::Result<wgpu::Surface<'static>> {
+ unsafe {
+ instance
+ .create_surface_unsafe(wgpu::SurfaceTargetUnsafe::RawHandle {
+ raw_display_handle,
+ raw_window_handle,
+ })
+ .map_err(|e| anyhow::anyhow!("{e}"))
+ }
}
struct RenderingParameters {