Add option to initialize on existing wgpu setup (#5319)

When mixing and matching eframe with other wgpu applications
(https://github.com/tracel-ai/burn in my case), it can be helpful to use
an existing wgpu setup to initialize eframe with. This PR changes the
WpuConfiguration (in a non-backwards compat way :/), to either take some
options how to create a wgpu setup, or an existing wgpu setup
(consisting of an instance, adapter, device and queue).

* [x] I have followed the instructions in the PR template

---------

Co-authored-by: Andreas Reich <r_andreas2@web.de>
This commit is contained in:
Arthur Brussee 2024-10-29 16:12:28 +00:00 committed by GitHub
parent fba2dc85a3
commit 759a0b2a21
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 269 additions and 190 deletions

View File

@ -1,15 +1,13 @@
use std::sync::Arc;
use raw_window_handle::{
DisplayHandle, HandleError, HasDisplayHandle, HasWindowHandle, RawDisplayHandle,
RawWindowHandle, WebDisplayHandle, WebWindowHandle, WindowHandle,
};
use std::sync::Arc;
use wasm_bindgen::JsValue;
use web_sys::HtmlCanvasElement;
use egui_wgpu::{RenderState, SurfaceErrorAction};
use crate::WebOptions;
use egui_wgpu::{RenderState, SurfaceErrorAction, WgpuSetup};
use super::web_painter::WebPainter;
@ -89,81 +87,97 @@ impl WebPainterWgpu {
) -> Result<Self, String> {
log::debug!("Creating wgpu painter");
let mut backends = options.wgpu_options.supported_backends;
let instance = match &options.wgpu_options.wgpu_setup {
WgpuSetup::CreateNew {
supported_backends: backends,
power_preference,
..
} => {
let mut backends = *backends;
// Don't try WebGPU if we're not in a secure context.
if backends.contains(wgpu::Backends::BROWSER_WEBGPU) {
let is_secure_context = web_sys::window().map_or(false, |w| w.is_secure_context());
if !is_secure_context {
log::info!(
"WebGPU is only available in secure contexts, i.e. on HTTPS and on localhost."
);
// Don't try WebGPU if we're not in a secure context.
if backends.contains(wgpu::Backends::BROWSER_WEBGPU) {
let is_secure_context =
web_sys::window().map_or(false, |w| w.is_secure_context());
if !is_secure_context {
log::info!(
"WebGPU is only available in secure contexts, i.e. on HTTPS and on localhost."
);
// Don't try WebGPU since we established now that it will fail.
backends.remove(wgpu::Backends::BROWSER_WEBGPU);
// Don't try WebGPU since we established now that it will fail.
backends.remove(wgpu::Backends::BROWSER_WEBGPU);
if backends.is_empty() {
return Err("No available supported graphics backends.".to_owned());
if backends.is_empty() {
return Err("No available supported graphics backends.".to_owned());
}
}
}
}
}
log::debug!("Creating wgpu instance with backends {:?}", backends);
let mut instance = wgpu::Instance::new(wgpu::InstanceDescriptor {
backends,
..Default::default()
});
log::debug!("Creating wgpu instance with backends {:?}", backends);
let mut instance = wgpu::Instance::new(wgpu::InstanceDescriptor {
backends,
..Default::default()
});
// It can happen that a browser advertises WebGPU support, but then fails to create a
// suitable adapter. As of writing this happens for example on Linux with Chrome 121.
//
// Since WebGPU is handled in a special way in wgpu, we have to recreate the instance
// if we instead want to try with WebGL.
//
// To make matters worse, once a canvas has been used with either WebGL or WebGPU,
// we can't go back and change that without replacing the canvas (which is hard to do from here).
// Therefore, we have to create the surface *after* requesting the adapter.
// However, wgpu offers to pass in a surface on adapter creation to ensure it is actually compatible with the chosen backend.
// This in turn isn't all that important on the web, but it still makes sense for the design of
// `egui::RenderState`!
// Therefore, we have to first check if it's possible to create a WebGPU adapter,
// and if it is not, start over with a WebGL instance.
//
// Note that we also might needlessly try this here if wgpu already determined that there's no
// WebGPU support in the first place. This is not a huge problem since it fails very fast, but
// it would be nice to avoid this. See https://github.com/gfx-rs/wgpu/issues/5142
if backends.contains(wgpu::Backends::BROWSER_WEBGPU) {
log::debug!("Attempting to create WebGPU adapter to check for support.");
if let Some(adapter) = instance
.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: options.wgpu_options.power_preference,
compatible_surface: None,
force_fallback_adapter: false,
})
.await
{
// WebGPU doesn't spec yet a destroy on the adapter, only on the device.
//adapter.destroy();
log::debug!(
"Successfully created WebGPU adapter, WebGPU confirmed to be supported!"
);
} else {
log::debug!("Failed to create WebGPU adapter.");
// It can happen that a browser advertises WebGPU support, but then fails to create a
// suitable adapter. As of writing this happens for example on Linux with Chrome 121.
//
// Since WebGPU is handled in a special way in wgpu, we have to recreate the instance
// if we instead want to try with WebGL.
//
// To make matters worse, once a canvas has been used with either WebGL or WebGPU,
// we can't go back and change that without replacing the canvas (which is hard to do from here).
// Therefore, we have to create the surface *after* requesting the adapter.
// However, wgpu offers to pass in a surface on adapter creation to ensure it is actually compatible with the chosen backend.
// This in turn isn't all that important on the web, but it still makes sense for the design of
// `egui::RenderState`!
// Therefore, we have to first check if it's possible to create a WebGPU adapter,
// and if it is not, start over with a WebGL instance.
//
// Note that we also might needlessly try this here if wgpu already determined that there's no
// WebGPU support in the first place. This is not a huge problem since it fails very fast, but
// it would be nice to avoid this. See https://github.com/gfx-rs/wgpu/issues/5142
if backends.contains(wgpu::Backends::BROWSER_WEBGPU) {
log::debug!("Attempting to create WebGPU adapter to check for support.");
if let Some(adapter) = instance
.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: *power_preference,
compatible_surface: None,
force_fallback_adapter: false,
})
.await
{
// WebGPU doesn't spec yet a destroy on the adapter, only on the device.
//adapter.destroy();
log::debug!(
"Successfully created WebGPU adapter, WebGPU confirmed to be supported!"
);
} else {
log::debug!("Failed to create WebGPU adapter.");
if backends.contains(wgpu::Backends::GL) {
log::debug!("Recreating wgpu instance with WebGL backend only.");
backends = wgpu::Backends::GL;
instance = wgpu::Instance::new(wgpu::InstanceDescriptor {
backends,
..Default::default()
});
} else {
return Err(
"Failed to create WebGPU adapter and WebGL was not enabled.".to_owned()
);
if backends.contains(wgpu::Backends::GL) {
log::debug!("Recreating wgpu instance with WebGL backend only.");
backends = wgpu::Backends::GL;
instance = wgpu::Instance::new(wgpu::InstanceDescriptor {
backends,
..Default::default()
});
} else {
return Err(
"Failed to create WebGPU adapter and WebGL was not enabled."
.to_owned(),
);
}
}
}
// On wasm, depending on feature flags, wgpu objects may or may not implement sync.
// It doesn't make sense to switch to Rc for that special usecase, so simply disable the lint.
#[allow(clippy::arc_with_non_send_sync)]
Arc::new(instance)
}
}
WgpuSetup::Existing { instance, .. } => instance.clone(),
};
let surface = instance
.create_surface(wgpu::SurfaceTarget::Canvas(canvas.clone()))

View File

@ -8,8 +8,8 @@
//! wgpu = { version = "*", features = ["webgpu", "webgl"] }
//! ```
//!
//! You can control whether WebGL or WebGPU will be picked at runtime by setting
//! [`WgpuConfiguration::supported_backends`].
//! You can control whether WebGL or WebGPU will be picked at runtime by configuring
//! [`WgpuConfiguration::wgpu_setup`].
//! The default is to prefer WebGPU and fall back on WebGL.
//!
//! ## Feature flags
@ -24,6 +24,7 @@ pub use wgpu;
mod renderer;
pub use renderer::*;
use wgpu::{Adapter, Device, Instance, Queue};
/// Module for painting [`egui`](https://github.com/emilk/egui) with [`wgpu`] on [`winit`].
#[cfg(feature = "winit")]
@ -98,73 +99,92 @@ impl RenderState {
#[cfg(not(target_arch = "wasm32"))]
let available_adapters = instance.enumerate_adapters(wgpu::Backends::all());
let adapter = {
crate::profile_scope!("request_adapter");
instance
.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: config.power_preference,
compatible_surface: Some(surface),
force_fallback_adapter: false,
})
.await
.ok_or_else(|| {
#[cfg(not(target_arch = "wasm32"))]
if available_adapters.is_empty() {
log::info!("No wgpu adapters found");
} else if available_adapters.len() == 1 {
log::info!(
"The only available wgpu adapter was not suitable: {}",
adapter_info_summary(&available_adapters[0].get_info())
);
} else {
log::info!(
"No suitable wgpu adapter found out of the {} available ones: {}",
available_adapters.len(),
describe_adapters(&available_adapters)
);
}
let (adapter, device, queue) = match config.wgpu_setup.clone() {
WgpuSetup::CreateNew {
supported_backends: _,
power_preference,
device_descriptor,
} => {
let adapter = {
crate::profile_scope!("request_adapter");
instance
.request_adapter(&wgpu::RequestAdapterOptions {
power_preference,
compatible_surface: Some(surface),
force_fallback_adapter: false,
})
.await
.ok_or_else(|| {
#[cfg(not(target_arch = "wasm32"))]
if available_adapters.is_empty() {
log::info!("No wgpu adapters found");
} else if available_adapters.len() == 1 {
log::info!(
"The only available wgpu adapter was not suitable: {}",
adapter_info_summary(&available_adapters[0].get_info())
);
} else {
log::info!(
"No suitable wgpu adapter found out of the {} available ones: {}",
available_adapters.len(),
describe_adapters(&available_adapters)
);
}
WgpuError::NoSuitableAdapterFound
})?
WgpuError::NoSuitableAdapterFound
})?
};
#[cfg(target_arch = "wasm32")]
log::debug!(
"Picked wgpu adapter: {}",
adapter_info_summary(&adapter.get_info())
);
#[cfg(not(target_arch = "wasm32"))]
if available_adapters.len() == 1 {
log::debug!(
"Picked the only available wgpu adapter: {}",
adapter_info_summary(&adapter.get_info())
);
} else {
log::info!(
"There were {} available wgpu adapters: {}",
available_adapters.len(),
describe_adapters(&available_adapters)
);
log::debug!(
"Picked wgpu adapter: {}",
adapter_info_summary(&adapter.get_info())
);
}
let (device, queue) = {
crate::profile_scope!("request_device");
adapter
.request_device(&(*device_descriptor)(&adapter), None)
.await?
};
// On wasm, depending on feature flags, wgpu objects may or may not implement sync.
// It doesn't make sense to switch to Rc for that special usecase, so simply disable the lint.
#[allow(clippy::arc_with_non_send_sync)]
(Arc::new(adapter), Arc::new(device), Arc::new(queue))
}
WgpuSetup::Existing {
instance: _,
adapter,
device,
queue,
} => (adapter, device, queue),
};
#[cfg(target_arch = "wasm32")]
log::debug!(
"Picked wgpu adapter: {}",
adapter_info_summary(&adapter.get_info())
);
#[cfg(not(target_arch = "wasm32"))]
if available_adapters.len() == 1 {
log::debug!(
"Picked the only available wgpu adapter: {}",
adapter_info_summary(&adapter.get_info())
);
} else {
log::info!(
"There were {} available wgpu adapters: {}",
available_adapters.len(),
describe_adapters(&available_adapters)
);
log::debug!(
"Picked wgpu adapter: {}",
adapter_info_summary(&adapter.get_info())
);
}
let capabilities = {
crate::profile_scope!("get_capabilities");
surface.get_capabilities(&adapter).formats
};
let target_format = crate::preferred_framebuffer_format(&capabilities)?;
let (device, queue) = {
crate::profile_scope!("request_device");
adapter
.request_device(&(*config.device_descriptor)(&adapter), None)
.await?
};
let renderer = Renderer::new(
&device,
target_format,
@ -177,11 +197,11 @@ impl RenderState {
// It doesn't make sense to switch to Rc for that special usecase, so simply disable the lint.
#[allow(clippy::arc_with_non_send_sync)]
Ok(Self {
adapter: Arc::new(adapter),
adapter,
#[cfg(not(target_arch = "wasm32"))]
available_adapters: available_adapters.into(),
device: Arc::new(device),
queue: Arc::new(queue),
device,
queue,
target_format,
renderer: Arc::new(RwLock::new(renderer)),
})
@ -215,27 +235,65 @@ pub enum SurfaceErrorAction {
RecreateSurface,
}
#[derive(Clone)]
pub enum WgpuSetup {
/// Construct a wgpu setup using some predefined settings & heuristics.
/// This is the default option. You can customize most behaviours overriding the
/// supported backends, power preferences, and device description.
///
/// This can also be configured with the environment variables:
/// * `WGPU_BACKEND`: `vulkan`, `dx11`, `dx12`, `metal`, `opengl`, `webgpu`
/// * `WGPU_POWER_PREF`: `low`, `high` or `none`
CreateNew {
/// Backends that should be supported (wgpu will pick one of these).
///
/// For instance, if you only want to support WebGL (and not WebGPU),
/// you can set this to [`wgpu::Backends::GL`].
///
/// By default on web, WebGPU will be used if available.
/// WebGL will only be used as a fallback,
/// and only if you have enabled the `webgl` feature of crate `wgpu`.
supported_backends: wgpu::Backends,
/// Power preference for the adapter.
power_preference: wgpu::PowerPreference,
/// Configuration passed on device request, given an adapter
device_descriptor:
Arc<dyn Fn(&wgpu::Adapter) -> wgpu::DeviceDescriptor<'static> + Send + Sync>,
},
/// Run on an existing wgpu setup.
Existing {
instance: Arc<Instance>,
adapter: Arc<Adapter>,
device: Arc<Device>,
queue: Arc<Queue>,
},
}
impl std::fmt::Debug for WgpuSetup {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::CreateNew {
supported_backends,
power_preference,
device_descriptor: _,
} => f
.debug_struct("AdapterSelection::Standard")
.field("supported_backends", &supported_backends)
.field("power_preference", &power_preference)
.finish(),
Self::Existing { .. } => f
.debug_struct("AdapterSelection::Existing")
.finish_non_exhaustive(),
}
}
}
/// Configuration for using wgpu with eframe or the egui-wgpu winit feature.
///
/// This can also be configured with the environment variables:
/// * `WGPU_BACKEND`: `vulkan`, `dx11`, `dx12`, `metal`, `opengl`, `webgpu`
/// * `WGPU_POWER_PREF`: `low`, `high` or `none`
#[derive(Clone)]
pub struct WgpuConfiguration {
/// Backends that should be supported (wgpu will pick one of these).
///
/// For instance, if you only want to support WebGL (and not WebGPU),
/// you can set this to [`wgpu::Backends::GL`].
///
/// By default on web, WebGPU will be used if available.
/// WebGL will only be used as a fallback,
/// and only if you have enabled the `webgl` feature of crate `wgpu`.
pub supported_backends: wgpu::Backends,
/// Configuration passed on device request, given an adapter
pub device_descriptor:
Arc<dyn Fn(&wgpu::Adapter) -> wgpu::DeviceDescriptor<'static> + Send + Sync>,
/// Present mode used for the primary surface.
pub present_mode: wgpu::PresentMode,
@ -248,8 +306,8 @@ pub struct WgpuConfiguration {
/// `None` = `wgpu` default.
pub desired_maximum_frame_latency: Option<u32>,
/// Power preference for the adapter.
pub power_preference: wgpu::PowerPreference,
/// How to create the wgpu adapter & device
pub wgpu_setup: WgpuSetup,
/// Callback for surface errors.
pub on_surface_error: Arc<dyn Fn(wgpu::SurfaceError) -> SurfaceErrorAction + Send + Sync>,
@ -264,21 +322,18 @@ fn wgpu_config_impl_send_sync() {
impl std::fmt::Debug for WgpuConfiguration {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let Self {
supported_backends,
device_descriptor: _,
present_mode,
desired_maximum_frame_latency,
power_preference,
wgpu_setup,
on_surface_error: _,
} = self;
f.debug_struct("WgpuConfiguration")
.field("supported_backends", &supported_backends)
.field("present_mode", &present_mode)
.field(
"desired_maximum_frame_latency",
&desired_maximum_frame_latency,
)
.field("power_preference", &power_preference)
.field("wgpu_setup", &wgpu_setup)
.finish_non_exhaustive()
}
}
@ -286,37 +341,42 @@ impl std::fmt::Debug for WgpuConfiguration {
impl Default for WgpuConfiguration {
fn default() -> Self {
Self {
// Add GL backend, primarily because WebGPU is not stable enough yet.
// (note however, that the GL backend needs to be opted-in via the wgpu feature flag "webgl")
supported_backends: wgpu::util::backend_bits_from_env()
.unwrap_or(wgpu::Backends::PRIMARY | wgpu::Backends::GL),
device_descriptor: Arc::new(|adapter| {
let base_limits = if adapter.get_info().backend == wgpu::Backend::Gl {
wgpu::Limits::downlevel_webgl2_defaults()
} else {
wgpu::Limits::default()
};
wgpu::DeviceDescriptor {
label: Some("egui wgpu device"),
required_features: wgpu::Features::default(),
required_limits: wgpu::Limits {
// When using a depth buffer, we have to be able to create a texture
// large enough for the entire surface, and we want to support 4k+ displays.
max_texture_dimension_2d: 8192,
..base_limits
},
memory_hints: wgpu::MemoryHints::default(),
}
}),
present_mode: wgpu::PresentMode::AutoVsync,
desired_maximum_frame_latency: None,
power_preference: wgpu::util::power_preference_from_env()
.unwrap_or(wgpu::PowerPreference::HighPerformance),
// By default, create a new wgpu setup. This will create a new instance, adapter, device and queue.
// This will create an instance for the supported backends (which can be configured by
// `WGPU_BACKEND`), and will pick an adapter by iterating adapters based on their power preference. The power
// preference can also be configured by `WGPU_POWER_PREF`.
wgpu_setup: WgpuSetup::CreateNew {
// Add GL backend, primarily because WebGPU is not stable enough yet.
// (note however, that the GL backend needs to be opted-in via the wgpu feature flag "webgl")
supported_backends: wgpu::util::backend_bits_from_env()
.unwrap_or(wgpu::Backends::PRIMARY | wgpu::Backends::GL),
power_preference: wgpu::util::power_preference_from_env()
.unwrap_or(wgpu::PowerPreference::HighPerformance),
device_descriptor: Arc::new(|adapter| {
let base_limits = if adapter.get_info().backend == wgpu::Backend::Gl {
wgpu::Limits::downlevel_webgl2_defaults()
} else {
wgpu::Limits::default()
};
wgpu::DeviceDescriptor {
label: Some("egui wgpu device"),
required_features: wgpu::Features::default(),
required_limits: wgpu::Limits {
// When using a depth buffer, we have to be able to create a texture
// large enough for the entire surface, and we want to support 4k+ displays.
max_texture_dimension_2d: 8192,
..base_limits
},
memory_hints: wgpu::MemoryHints::default(),
}
}),
},
on_surface_error: Arc::new(|err| {
if err == wgpu::SurfaceError::Outdated {

View File

@ -87,7 +87,7 @@ pub struct Painter {
depth_format: Option<wgpu::TextureFormat>,
screen_capture_state: Option<CaptureState>,
instance: wgpu::Instance,
instance: Arc<wgpu::Instance>,
render_state: Option<RenderState>,
// Per viewport/window:
@ -116,10 +116,15 @@ impl Painter {
support_transparent_backbuffer: bool,
dithering: bool,
) -> Self {
let instance = wgpu::Instance::new(wgpu::InstanceDescriptor {
backends: configuration.supported_backends,
..Default::default()
});
let instance = match &configuration.wgpu_setup {
crate::WgpuSetup::CreateNew {
supported_backends, ..
} => Arc::new(wgpu::Instance::new(wgpu::InstanceDescriptor {
backends: *supported_backends,
..Default::default()
})),
crate::WgpuSetup::Existing { instance, .. } => instance.clone(),
};
Self {
configuration,