Composite layers in HDR color space

This commit is contained in:
Skyler Lehmkuhl 2025-12-08 04:20:48 -05:00
parent 2caea564ac
commit 420f3bf7b9
9 changed files with 1512 additions and 25 deletions

View File

@ -10,6 +10,10 @@ serde_json = { workspace = true }
# UI framework (for Color32 conversion) # UI framework (for Color32 conversion)
egui = "0.31" egui = "0.31"
# GPU rendering infrastructure
wgpu = { workspace = true }
bytemuck = { version = "1.14", features = ["derive"] }
# Geometry and rendering # Geometry and rendering
kurbo = { workspace = true } kurbo = { workspace = true }
vello = { workspace = true } vello = { workspace = true }

View File

@ -0,0 +1,263 @@
// Buffer pool for efficient render target management
//
// Provides acquire/release semantics for GPU textures used in the compositing pipeline.
// Buffers are reused when possible to minimize allocation overhead.
use wgpu;
/// Handle to a pooled render buffer
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub struct BufferHandle(pub(crate) u32);
impl BufferHandle {
/// Returns the raw handle ID (for debugging)
pub fn id(&self) -> u32 {
self.0
}
}
/// Texture format for render buffers
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub enum BufferFormat {
/// 8-bit linear (Vello output format - needs STORAGE_BINDING)
/// Note: Using Rgba8Unorm instead of Rgba8UnormSrgb because sRGB doesn't support storage binding
Rgba8Srgb,
/// 16-bit float HDR (internal processing format)
Rgba16Float,
}
impl BufferFormat {
/// Convert to wgpu texture format
pub fn to_wgpu(&self) -> wgpu::TextureFormat {
match self {
// Use Rgba8Unorm for Vello compatibility (STORAGE_BINDING required)
// Vello handles color space conversion internally
BufferFormat::Rgba8Srgb => wgpu::TextureFormat::Rgba8Unorm,
BufferFormat::Rgba16Float => wgpu::TextureFormat::Rgba16Float,
}
}
}
/// Specification for a render buffer
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub struct BufferSpec {
pub width: u32,
pub height: u32,
pub format: BufferFormat,
}
impl BufferSpec {
pub fn new(width: u32, height: u32, format: BufferFormat) -> Self {
Self { width, height, format }
}
pub fn hdr(width: u32, height: u32) -> Self {
Self::new(width, height, BufferFormat::Rgba16Float)
}
}
/// Internal pooled buffer storage
struct PooledBuffer {
handle: BufferHandle,
texture: wgpu::Texture,
view: wgpu::TextureView,
spec: BufferSpec,
in_use: bool,
/// Frame counter when last used (for cleanup)
last_used_frame: u64,
}
/// Buffer pool for render target management
///
/// Provides efficient allocation and reuse of GPU textures for the compositing pipeline.
/// Buffers are acquired for rendering and released when no longer needed.
pub struct BufferPool {
buffers: Vec<PooledBuffer>,
next_id: u32,
current_frame: u64,
/// Maximum number of unused frames before a buffer is eligible for cleanup
max_unused_frames: u64,
}
impl BufferPool {
/// Create a new empty buffer pool
pub fn new() -> Self {
Self {
buffers: Vec::new(),
next_id: 0,
current_frame: 0,
max_unused_frames: 60, // ~1 second at 60fps
}
}
/// Acquire a buffer matching the given specification
///
/// Returns a handle to a buffer that can be used for rendering.
/// The buffer may be newly created or reused from the pool.
pub fn acquire(&mut self, device: &wgpu::Device, spec: BufferSpec) -> BufferHandle {
// First, try to find a free buffer with matching spec
for buffer in &mut self.buffers {
if !buffer.in_use && buffer.spec == spec {
buffer.in_use = true;
buffer.last_used_frame = self.current_frame;
return buffer.handle;
}
}
// No matching buffer found, create a new one
let handle = BufferHandle(self.next_id);
self.next_id += 1;
let texture = device.create_texture(&wgpu::TextureDescriptor {
label: Some(&format!("pool_buffer_{}", handle.0)),
size: wgpu::Extent3d {
width: spec.width,
height: spec.height,
depth_or_array_layers: 1,
},
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: spec.format.to_wgpu(),
usage: wgpu::TextureUsages::RENDER_ATTACHMENT
| wgpu::TextureUsages::TEXTURE_BINDING
| wgpu::TextureUsages::STORAGE_BINDING
| wgpu::TextureUsages::COPY_SRC
| wgpu::TextureUsages::COPY_DST,
view_formats: &[],
});
let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
self.buffers.push(PooledBuffer {
handle,
texture,
view,
spec,
in_use: true,
last_used_frame: self.current_frame,
});
handle
}
/// Release a buffer back to the pool
///
/// The buffer becomes available for reuse by future acquire calls.
pub fn release(&mut self, handle: BufferHandle) {
if let Some(buffer) = self.buffers.iter_mut().find(|b| b.handle == handle) {
buffer.in_use = false;
}
}
/// Get the texture view for a buffer handle
pub fn get_view(&self, handle: BufferHandle) -> Option<&wgpu::TextureView> {
self.buffers
.iter()
.find(|b| b.handle == handle)
.map(|b| &b.view)
}
/// Get the texture for a buffer handle
pub fn get_texture(&self, handle: BufferHandle) -> Option<&wgpu::Texture> {
self.buffers
.iter()
.find(|b| b.handle == handle)
.map(|b| &b.texture)
}
/// Get the spec for a buffer handle
pub fn get_spec(&self, handle: BufferHandle) -> Option<BufferSpec> {
self.buffers
.iter()
.find(|b| b.handle == handle)
.map(|b| b.spec)
}
/// Check if a buffer is currently in use
pub fn is_in_use(&self, handle: BufferHandle) -> bool {
self.buffers
.iter()
.find(|b| b.handle == handle)
.map(|b| b.in_use)
.unwrap_or(false)
}
/// Advance to the next frame
///
/// Call this once per frame to track buffer usage over time.
pub fn next_frame(&mut self) {
self.current_frame += 1;
}
/// Clear buffers that haven't been used for a while
///
/// Removes buffers that are not in use and haven't been used for
/// more than `max_unused_frames` frames.
pub fn clear_unused(&mut self) {
let current = self.current_frame;
let max_unused = self.max_unused_frames;
self.buffers.retain(|b| {
b.in_use || (current - b.last_used_frame) < max_unused
});
}
/// Force clear all unused buffers immediately
pub fn clear_all_unused(&mut self) {
self.buffers.retain(|b| b.in_use);
}
/// Get statistics about the pool
pub fn stats(&self) -> BufferPoolStats {
let total = self.buffers.len();
let in_use = self.buffers.iter().filter(|b| b.in_use).count();
let total_bytes: u64 = self.buffers.iter().map(|b| {
let bytes_per_pixel = match b.spec.format {
BufferFormat::Rgba8Srgb => 4,
BufferFormat::Rgba16Float => 8,
};
(b.spec.width as u64) * (b.spec.height as u64) * bytes_per_pixel
}).sum();
BufferPoolStats {
total_buffers: total,
buffers_in_use: in_use,
total_bytes,
}
}
}
impl Default for BufferPool {
fn default() -> Self {
Self::new()
}
}
/// Statistics about buffer pool usage
#[derive(Clone, Debug)]
pub struct BufferPoolStats {
pub total_buffers: usize,
pub buffers_in_use: usize,
pub total_bytes: u64,
}
impl BufferPoolStats {
pub fn total_megabytes(&self) -> f64 {
self.total_bytes as f64 / (1024.0 * 1024.0)
}
}
#[cfg(test)]
mod tests {
use super::*;
// Note: These tests require a wgpu device, so they're marked as ignored
// Run with: cargo test -- --ignored
#[test]
#[ignore]
fn test_buffer_pool_basics() {
// Would need wgpu device setup for actual testing
}
}

View File

@ -0,0 +1,549 @@
// Compositor for blending layers with proper opacity
//
// Handles alpha-over compositing with per-layer opacity and blend modes.
// All processing is done in HDR (RGBA16Float) linear color space.
use super::buffer_pool::{BufferHandle, BufferPool};
/// Blend mode for layer compositing
#[derive(Clone, Copy, Debug, PartialEq, Eq, Default)]
pub enum BlendMode {
/// Standard alpha-over compositing (Porter-Duff "over")
#[default]
Normal,
/// Additive blending (src + dst)
Add,
/// Multiply (src * dst)
Multiply,
/// Screen (1 - (1-src) * (1-dst))
Screen,
/// Overlay (multiply dark, screen light)
Overlay,
/// Soft light
SoftLight,
/// Hard light
HardLight,
/// Color dodge
ColorDodge,
/// Color burn
ColorBurn,
/// Darken (min)
Darken,
/// Lighten (max)
Lighten,
/// Difference (abs(src - dst))
Difference,
/// Exclusion
Exclusion,
}
impl BlendMode {
/// Get the blend mode index for shader uniform
pub fn to_index(&self) -> u32 {
match self {
BlendMode::Normal => 0,
BlendMode::Add => 1,
BlendMode::Multiply => 2,
BlendMode::Screen => 3,
BlendMode::Overlay => 4,
BlendMode::SoftLight => 5,
BlendMode::HardLight => 6,
BlendMode::ColorDodge => 7,
BlendMode::ColorBurn => 8,
BlendMode::Darken => 9,
BlendMode::Lighten => 10,
BlendMode::Difference => 11,
BlendMode::Exclusion => 12,
}
}
/// Get all available blend modes
pub fn all() -> &'static [BlendMode] {
&[
BlendMode::Normal,
BlendMode::Add,
BlendMode::Multiply,
BlendMode::Screen,
BlendMode::Overlay,
BlendMode::SoftLight,
BlendMode::HardLight,
BlendMode::ColorDodge,
BlendMode::ColorBurn,
BlendMode::Darken,
BlendMode::Lighten,
BlendMode::Difference,
BlendMode::Exclusion,
]
}
/// Get display name for UI
pub fn display_name(&self) -> &'static str {
match self {
BlendMode::Normal => "Normal",
BlendMode::Add => "Add",
BlendMode::Multiply => "Multiply",
BlendMode::Screen => "Screen",
BlendMode::Overlay => "Overlay",
BlendMode::SoftLight => "Soft Light",
BlendMode::HardLight => "Hard Light",
BlendMode::ColorDodge => "Color Dodge",
BlendMode::ColorBurn => "Color Burn",
BlendMode::Darken => "Darken",
BlendMode::Lighten => "Lighten",
BlendMode::Difference => "Difference",
BlendMode::Exclusion => "Exclusion",
}
}
}
/// A layer to be composited
#[derive(Clone, Debug)]
pub struct CompositorLayer {
/// Handle to the layer's rendered buffer
pub buffer: BufferHandle,
/// Layer opacity (0.0 to 1.0)
pub opacity: f32,
/// Blend mode for this layer
pub blend_mode: BlendMode,
}
impl CompositorLayer {
pub fn new(buffer: BufferHandle, opacity: f32, blend_mode: BlendMode) -> Self {
Self {
buffer,
opacity: opacity.clamp(0.0, 1.0),
blend_mode,
}
}
pub fn normal(buffer: BufferHandle, opacity: f32) -> Self {
Self::new(buffer, opacity, BlendMode::Normal)
}
}
/// Uniform data for the composite shader
#[repr(C)]
#[derive(Clone, Copy, Debug, bytemuck::Pod, bytemuck::Zeroable)]
pub struct CompositeUniforms {
/// Layer opacity (0.0 to 1.0)
pub opacity: f32,
/// Blend mode index
pub blend_mode: u32,
/// Padding for alignment
pub _padding: [u32; 2],
}
/// Compositor for blending layers
///
/// Handles the final compositing pass that combines all rendered layers
/// with proper opacity and blend modes.
pub struct Compositor {
/// Render pipeline for compositing
pipeline: wgpu::RenderPipeline,
/// Bind group layout for layer textures
bind_group_layout: wgpu::BindGroupLayout,
/// Sampler for texture sampling
sampler: wgpu::Sampler,
}
impl Compositor {
/// Create a new compositor
pub fn new(device: &wgpu::Device, output_format: wgpu::TextureFormat) -> Self {
// Create bind group layout
let bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: Some("compositor_bind_group_layout"),
entries: &[
// Source layer texture
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Texture {
sample_type: wgpu::TextureSampleType::Float { filterable: true },
view_dimension: wgpu::TextureViewDimension::D2,
multisampled: false,
},
count: None,
},
// Sampler
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
count: None,
},
// Uniforms
wgpu::BindGroupLayoutEntry {
binding: 2,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
},
],
});
// Create pipeline layout
let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("compositor_pipeline_layout"),
bind_group_layouts: &[&bind_group_layout],
push_constant_ranges: &[],
});
// Create shader module
let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some("compositor_shader"),
source: wgpu::ShaderSource::Wgsl(COMPOSITE_SHADER.into()),
});
// Create render pipeline
let pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("compositor_pipeline"),
layout: Some(&pipeline_layout),
vertex: wgpu::VertexState {
module: &shader,
entry_point: Some("vs_main"),
buffers: &[],
compilation_options: wgpu::PipelineCompilationOptions::default(),
},
fragment: Some(wgpu::FragmentState {
module: &shader,
entry_point: Some("fs_main"),
targets: &[Some(wgpu::ColorTargetState {
format: output_format,
// Use premultiplied alpha blending for compositing
blend: Some(wgpu::BlendState {
color: wgpu::BlendComponent {
src_factor: wgpu::BlendFactor::One,
dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
operation: wgpu::BlendOperation::Add,
},
alpha: wgpu::BlendComponent {
src_factor: wgpu::BlendFactor::One,
dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
operation: wgpu::BlendOperation::Add,
},
}),
write_mask: wgpu::ColorWrites::ALL,
})],
compilation_options: wgpu::PipelineCompilationOptions::default(),
}),
primitive: wgpu::PrimitiveState {
topology: wgpu::PrimitiveTopology::TriangleStrip,
strip_index_format: None,
front_face: wgpu::FrontFace::Ccw,
cull_mode: None,
polygon_mode: wgpu::PolygonMode::Fill,
unclipped_depth: false,
conservative: false,
},
depth_stencil: None,
multisample: wgpu::MultisampleState::default(),
multiview: None,
cache: None,
});
// Create sampler
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
label: Some("compositor_sampler"),
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Linear,
mipmap_filter: wgpu::FilterMode::Nearest,
..Default::default()
});
Self {
pipeline,
bind_group_layout,
sampler,
}
}
/// Create a bind group for compositing a layer
pub fn create_layer_bind_group(
&self,
device: &wgpu::Device,
layer_view: &wgpu::TextureView,
uniforms_buffer: &wgpu::Buffer,
) -> wgpu::BindGroup {
device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("compositor_layer_bind_group"),
layout: &self.bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(layer_view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&self.sampler),
},
wgpu::BindGroupEntry {
binding: 2,
resource: uniforms_buffer.as_entire_binding(),
},
],
})
}
/// Composite layers onto the output texture
///
/// Layers are composited in order (first layer is bottom, last is top).
/// The output texture should be cleared before calling this method.
pub fn composite(
&self,
device: &wgpu::Device,
queue: &wgpu::Queue,
encoder: &mut wgpu::CommandEncoder,
layers: &[CompositorLayer],
buffer_pool: &BufferPool,
output: &wgpu::TextureView,
clear_color: Option<[f32; 4]>,
) {
// Create uniforms buffer (reused for all layers)
let uniforms_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("compositor_uniforms"),
size: std::mem::size_of::<CompositeUniforms>() as u64,
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
for (i, layer) in layers.iter().enumerate() {
let Some(layer_view) = buffer_pool.get_view(layer.buffer) else {
continue;
};
// Update uniforms
let uniforms = CompositeUniforms {
opacity: layer.opacity,
blend_mode: layer.blend_mode.to_index(),
_padding: [0, 0],
};
queue.write_buffer(&uniforms_buffer, 0, bytemuck::bytes_of(&uniforms));
// Create bind group for this layer
let bind_group = self.create_layer_bind_group(device, layer_view, &uniforms_buffer);
// Determine load operation (clear on first layer if requested)
let load_op = if i == 0 {
if let Some(color) = clear_color {
wgpu::LoadOp::Clear(wgpu::Color {
r: color[0] as f64,
g: color[1] as f64,
b: color[2] as f64,
a: color[3] as f64,
})
} else {
wgpu::LoadOp::Load
}
} else {
wgpu::LoadOp::Load
};
// Render pass for this layer
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: Some(&format!("composite_layer_{}", i)),
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
view: output,
resolve_target: None,
ops: wgpu::Operations {
load: load_op,
store: wgpu::StoreOp::Store,
},
})],
depth_stencil_attachment: None,
occlusion_query_set: None,
timestamp_writes: None,
});
render_pass.set_pipeline(&self.pipeline);
render_pass.set_bind_group(0, &bind_group, &[]);
render_pass.draw(0..4, 0..1);
}
}
/// Get the bind group layout (for external use)
pub fn bind_group_layout(&self) -> &wgpu::BindGroupLayout {
&self.bind_group_layout
}
}
/// WGSL shader for layer compositing
const COMPOSITE_SHADER: &str = r#"
// Compositor shader - blends a source layer onto the destination with opacity and blend modes
struct Uniforms {
opacity: f32,
blend_mode: u32,
_padding: vec2<u32>,
}
@group(0) @binding(0) var source_tex: texture_2d<f32>;
@group(0) @binding(1) var source_sampler: sampler;
@group(0) @binding(2) var<uniform> uniforms: Uniforms;
struct VertexOutput {
@builtin(position) position: vec4<f32>,
@location(0) uv: vec2<f32>,
}
// Fullscreen triangle strip
@vertex
fn vs_main(@builtin(vertex_index) vertex_index: u32) -> VertexOutput {
var out: VertexOutput;
let x = f32((vertex_index & 1u) << 1u);
let y = f32(vertex_index & 2u);
out.position = vec4<f32>(x * 2.0 - 1.0, 1.0 - y * 2.0, 0.0, 1.0);
out.uv = vec2<f32>(x, y);
return out;
}
// sRGB to linear color space conversion
// Vello outputs sRGB-encoded colors, we need linear for correct HDR blending
fn srgb_to_linear_channel(c: f32) -> f32 {
return select(
pow((c + 0.055) / 1.055, 2.4),
c / 12.92,
c <= 0.04045
);
}
fn srgb_to_linear(color: vec3<f32>) -> vec3<f32> {
return vec3<f32>(
srgb_to_linear_channel(color.r),
srgb_to_linear_channel(color.g),
srgb_to_linear_channel(color.b)
);
}
// Blend mode implementations
fn blend_normal(src: vec3<f32>, dst: vec3<f32>) -> vec3<f32> {
return src;
}
fn blend_add(src: vec3<f32>, dst: vec3<f32>) -> vec3<f32> {
return src + dst;
}
fn blend_multiply(src: vec3<f32>, dst: vec3<f32>) -> vec3<f32> {
return src * dst;
}
fn blend_screen(src: vec3<f32>, dst: vec3<f32>) -> vec3<f32> {
return 1.0 - (1.0 - src) * (1.0 - dst);
}
fn blend_overlay_channel(s: f32, d: f32) -> f32 {
return select(
1.0 - 2.0 * (1.0 - s) * (1.0 - d),
2.0 * s * d,
d < 0.5
);
}
fn blend_overlay(src: vec3<f32>, dst: vec3<f32>) -> vec3<f32> {
return vec3<f32>(
blend_overlay_channel(src.r, dst.r),
blend_overlay_channel(src.g, dst.g),
blend_overlay_channel(src.b, dst.b)
);
}
fn blend_soft_light_channel(s: f32, d: f32) -> f32 {
return select(
d - (1.0 - 2.0 * s) * d * (1.0 - d),
d + (2.0 * s - 1.0) * (select(
((16.0 * d - 12.0) * d + 4.0) * d,
sqrt(d),
d > 0.25
) - d),
s <= 0.5
);
}
fn blend_soft_light(src: vec3<f32>, dst: vec3<f32>) -> vec3<f32> {
return vec3<f32>(
blend_soft_light_channel(src.r, dst.r),
blend_soft_light_channel(src.g, dst.g),
blend_soft_light_channel(src.b, dst.b)
);
}
fn blend_hard_light(src: vec3<f32>, dst: vec3<f32>) -> vec3<f32> {
// Hard light is overlay with src and dst swapped
return blend_overlay(dst, src);
}
fn blend_color_dodge(src: vec3<f32>, dst: vec3<f32>) -> vec3<f32> {
return select(
min(vec3<f32>(1.0), dst / (1.0 - src)),
vec3<f32>(1.0),
src.r >= 1.0 || src.g >= 1.0 || src.b >= 1.0
);
}
fn blend_color_burn(src: vec3<f32>, dst: vec3<f32>) -> vec3<f32> {
return select(
1.0 - min(vec3<f32>(1.0), (1.0 - dst) / src),
vec3<f32>(0.0),
src.r <= 0.0 || src.g <= 0.0 || src.b <= 0.0
);
}
fn blend_darken(src: vec3<f32>, dst: vec3<f32>) -> vec3<f32> {
return min(src, dst);
}
fn blend_lighten(src: vec3<f32>, dst: vec3<f32>) -> vec3<f32> {
return max(src, dst);
}
fn blend_difference(src: vec3<f32>, dst: vec3<f32>) -> vec3<f32> {
return abs(src - dst);
}
fn blend_exclusion(src: vec3<f32>, dst: vec3<f32>) -> vec3<f32> {
return src + dst - 2.0 * src * dst;
}
fn apply_blend(src: vec3<f32>, dst: vec3<f32>, mode: u32) -> vec3<f32> {
switch (mode) {
case 0u: { return blend_normal(src, dst); } // Normal
case 1u: { return blend_add(src, dst); } // Add
case 2u: { return blend_multiply(src, dst); } // Multiply
case 3u: { return blend_screen(src, dst); } // Screen
case 4u: { return blend_overlay(src, dst); } // Overlay
case 5u: { return blend_soft_light(src, dst); } // Soft Light
case 6u: { return blend_hard_light(src, dst); } // Hard Light
case 7u: { return blend_color_dodge(src, dst); } // Color Dodge
case 8u: { return blend_color_burn(src, dst); } // Color Burn
case 9u: { return blend_darken(src, dst); } // Darken
case 10u: { return blend_lighten(src, dst); } // Lighten
case 11u: { return blend_difference(src, dst); } // Difference
case 12u: { return blend_exclusion(src, dst); } // Exclusion
default: { return blend_normal(src, dst); }
}
}
@fragment
fn fs_main(in: VertexOutput) -> @location(0) vec4<f32> {
let src = textureSample(source_tex, source_sampler, in.uv);
// Convert Vello's sRGB output to linear for correct HDR blending
let linear_rgb = srgb_to_linear(src.rgb);
// Apply opacity
let src_alpha = src.a * uniforms.opacity;
// Output premultiplied alpha in linear color space
return vec4<f32>(linear_rgb * src_alpha, src_alpha);
}
"#;

View File

@ -0,0 +1,19 @@
// GPU rendering infrastructure for HDR compositing pipeline
//
// This module provides:
// - Buffer pooling for efficient render target management
// - Compositor for layer blending with proper opacity
// - Effect pipeline for GPU shader effects
pub mod buffer_pool;
pub mod compositor;
// Re-export commonly used types
pub use buffer_pool::{BufferHandle, BufferPool, BufferSpec, BufferFormat};
pub use compositor::{Compositor, CompositorLayer, BlendMode};
/// Standard HDR internal texture format (16-bit float per channel)
pub const HDR_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Rgba16Float;
/// Display output format (8-bit sRGB)
pub const DISPLAY_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Rgba8Unorm;

View File

@ -1,6 +1,7 @@
// Lightningbeam Core Library // Lightningbeam Core Library
// Shared data structures and types // Shared data structures and types
pub mod gpu;
pub mod layout; pub mod layout;
pub mod pane; pub mod pane;
pub mod tool; pub mod tool;

View File

@ -1,10 +1,17 @@
//! Rendering system for Lightningbeam documents //! Rendering system for Lightningbeam documents
//! //!
//! Renders documents to Vello scenes for GPU-accelerated display. //! Renders documents to Vello scenes for GPU-accelerated display.
//!
//! This module supports two rendering modes:
//! 1. **Legacy mode**: All layers rendered to a single Scene (simple, fast)
//! 2. **Compositing mode**: Each layer rendered to its own Scene for HDR compositing
//!
//! The compositing mode enables proper per-layer opacity, blend modes, and effects.
use crate::animation::TransformProperty; use crate::animation::TransformProperty;
use crate::clip::ImageAsset; use crate::clip::ImageAsset;
use crate::document::Document; use crate::document::Document;
use crate::gpu::BlendMode;
use crate::layer::{AnyLayer, LayerTrait, VectorLayer}; use crate::layer::{AnyLayer, LayerTrait, VectorLayer};
use kurbo::{Affine, Shape}; use kurbo::{Affine, Shape};
use std::collections::HashMap; use std::collections::HashMap;
@ -75,6 +82,225 @@ fn decode_image_asset(asset: &ImageAsset) -> Option<Image> {
)) ))
} }
// ============================================================================
// Per-Layer Rendering for HDR Compositing Pipeline
// ============================================================================
/// Metadata for a rendered layer, used for compositing
pub struct RenderedLayer {
/// The layer's unique identifier
pub layer_id: Uuid,
/// The Vello scene containing the layer's rendered content
pub scene: Scene,
/// Layer opacity (0.0 to 1.0)
pub opacity: f32,
/// Blend mode for compositing
pub blend_mode: BlendMode,
/// Whether this layer has any visible content
pub has_content: bool,
}
impl RenderedLayer {
/// Create a new rendered layer with default settings
pub fn new(layer_id: Uuid) -> Self {
Self {
layer_id,
scene: Scene::new(),
opacity: 1.0,
blend_mode: BlendMode::Normal,
has_content: false,
}
}
/// Create with specific opacity and blend mode
pub fn with_settings(layer_id: Uuid, opacity: f32, blend_mode: BlendMode) -> Self {
Self {
layer_id,
scene: Scene::new(),
opacity,
blend_mode,
has_content: false,
}
}
}
/// Result of rendering a document for compositing
pub struct CompositeRenderResult {
/// Background scene (rendered separately for potential optimization)
pub background: Scene,
/// Rendered layers in bottom-to-top order
pub layers: Vec<RenderedLayer>,
/// Document dimensions
pub width: f64,
pub height: f64,
}
/// Render a document for the HDR compositing pipeline
///
/// Unlike `render_document_with_transform`, this function renders each visible
/// layer to its own Scene, enabling proper per-layer opacity, blend modes,
/// and effects in the GPU compositor.
///
/// Layers are returned in bottom-to-top order for compositing.
pub fn render_document_for_compositing(
document: &Document,
base_transform: Affine,
image_cache: &mut ImageCache,
video_manager: &std::sync::Arc<std::sync::Mutex<crate::video::VideoManager>>,
) -> CompositeRenderResult {
let time = document.current_time;
// Render background to its own scene
let mut background = Scene::new();
render_background(document, &mut background, base_transform);
// Check if any layers are soloed
let any_soloed = document.visible_layers().any(|layer| layer.soloed());
// Collect layers to render
let layers_to_render: Vec<_> = document
.visible_layers()
.filter(|layer| {
if any_soloed {
layer.soloed()
} else {
true
}
})
.collect();
// Render each layer to its own scene
let mut rendered_layers = Vec::with_capacity(layers_to_render.len());
for layer in layers_to_render {
let rendered = render_layer_isolated(
document,
time,
layer,
base_transform,
image_cache,
video_manager,
);
rendered_layers.push(rendered);
}
CompositeRenderResult {
background,
layers: rendered_layers,
width: document.width,
height: document.height,
}
}
/// Render a single layer to its own isolated Scene
///
/// The layer is rendered with full opacity in its scene; the actual opacity
/// will be applied during compositing. This enables proper alpha blending
/// for nested clips and complex layer hierarchies.
pub fn render_layer_isolated(
document: &Document,
time: f64,
layer: &AnyLayer,
base_transform: Affine,
image_cache: &mut ImageCache,
video_manager: &std::sync::Arc<std::sync::Mutex<crate::video::VideoManager>>,
) -> RenderedLayer {
let layer_id = layer.id();
let opacity = layer.opacity() as f32;
// TODO: When we add blend mode support to layers, read it here
let blend_mode = BlendMode::Normal;
let mut rendered = RenderedLayer::with_settings(layer_id, opacity, blend_mode);
// Render layer content with full opacity (1.0) - opacity applied during compositing
match layer {
AnyLayer::Vector(vector_layer) => {
render_vector_layer_to_scene(
document,
time,
vector_layer,
&mut rendered.scene,
base_transform,
1.0, // Full opacity - layer opacity handled in compositing
image_cache,
video_manager,
);
rendered.has_content = !vector_layer.shape_instances.is_empty()
|| !vector_layer.clip_instances.is_empty();
}
AnyLayer::Audio(_) => {
// Audio layers don't render visually
rendered.has_content = false;
}
AnyLayer::Video(video_layer) => {
let mut video_mgr = video_manager.lock().unwrap();
render_video_layer_to_scene(
document,
time,
video_layer,
&mut rendered.scene,
base_transform,
1.0, // Full opacity - layer opacity handled in compositing
&mut video_mgr,
);
rendered.has_content = !video_layer.clip_instances.is_empty();
}
}
rendered
}
/// Render a vector layer to an isolated scene (for compositing pipeline)
fn render_vector_layer_to_scene(
document: &Document,
time: f64,
layer: &VectorLayer,
scene: &mut Scene,
base_transform: Affine,
parent_opacity: f64,
image_cache: &mut ImageCache,
video_manager: &std::sync::Arc<std::sync::Mutex<crate::video::VideoManager>>,
) {
// Render using the existing function but to this isolated scene
render_vector_layer(
document,
time,
layer,
scene,
base_transform,
parent_opacity,
image_cache,
video_manager,
);
}
/// Render a video layer to an isolated scene (for compositing pipeline)
fn render_video_layer_to_scene(
document: &Document,
time: f64,
layer: &crate::layer::VideoLayer,
scene: &mut Scene,
base_transform: Affine,
parent_opacity: f64,
video_manager: &mut crate::video::VideoManager,
) {
// Render using the existing function but to this isolated scene
render_video_layer(
document,
time,
layer,
scene,
base_transform,
parent_opacity,
video_manager,
);
}
// ============================================================================
// Legacy Single-Scene Rendering (kept for backwards compatibility)
// ============================================================================
/// Render a document to a Vello scene /// Render a document to a Vello scene
pub fn render_document( pub fn render_document(
document: &Document, document: &Document,

View File

@ -0,0 +1,56 @@
// Linear to sRGB color space conversion (fragment shader)
//
// Blits from HDR composite texture to display output.
// Input: RGBA16Float HDR texture in LINEAR color space
// Output: RGBA8Unorm sRGB for display
//
// The HDR texture contains linear color values (compositor converts
// Vello's sRGB output to linear). This shader converts back to sRGB
// for correct display on standard monitors.
@group(0) @binding(0) var input_tex: texture_2d<f32>;
@group(0) @binding(1) var input_sampler: sampler;
struct VertexOutput {
@builtin(position) position: vec4<f32>,
@location(0) uv: vec2<f32>,
}
// Fullscreen triangle vertex shader (3 vertices for a full-screen triangle)
@vertex
fn vs_main(@builtin(vertex_index) vertex_index: u32) -> VertexOutput {
var out: VertexOutput;
let x = f32((vertex_index & 1u) << 1u);
let y = f32(vertex_index & 2u);
out.position = vec4<f32>(x * 2.0 - 1.0, 1.0 - y * 2.0, 0.0, 1.0);
out.uv = vec2<f32>(x, y);
return out;
}
// Linear to sRGB conversion for a single channel
// Formula: c <= 0.0031308 ? c*12.92 : 1.055*pow(c, 1/2.4) - 0.055
fn linear_to_srgb_channel(c: f32) -> f32 {
let clamped = clamp(c, 0.0, 1.0);
return select(
1.055 * pow(clamped, 1.0 / 2.4) - 0.055,
clamped * 12.92,
clamped <= 0.0031308
);
}
@fragment
fn fs_main(in: VertexOutput) -> @location(0) vec4<f32> {
// Sample linear HDR texture
let linear = textureSample(input_tex, input_sampler, in.uv);
// Convert from linear to sRGB for display (alpha stays linear)
return vec4<f32>(
linear_to_srgb_channel(linear.r),
linear_to_srgb_channel(linear.g),
linear_to_srgb_channel(linear.b),
linear.a
);
}

View File

@ -0,0 +1,42 @@
// sRGB to Linear color space conversion (compute shader)
//
// Converts an sRGB texture to linear color space for HDR processing.
// Input: RGBA8 sRGB texture
// Output: RGBA16Float linear texture
@group(0) @binding(0) var input_tex: texture_2d<f32>;
@group(0) @binding(1) var output_tex: texture_storage_2d<rgba16float, write>;
// sRGB to linear conversion for a single channel
// Formula: c <= 0.04045 ? c/12.92 : pow((c+0.055)/1.055, 2.4)
fn srgb_to_linear(c: f32) -> f32 {
return select(
pow((c + 0.055) / 1.055, 2.4),
c / 12.92,
c <= 0.04045
);
}
@compute @workgroup_size(8, 8)
fn main(@builtin(global_invocation_id) gid: vec3<u32>) {
let dims = textureDimensions(input_tex);
// Bounds check
if (gid.x >= dims.x || gid.y >= dims.y) {
return;
}
// Load sRGB pixel
let srgb = textureLoad(input_tex, gid.xy, 0);
// Convert RGB channels to linear (alpha stays linear)
let linear = vec4<f32>(
srgb_to_linear(srgb.r),
srgb_to_linear(srgb.g),
srgb_to_linear(srgb.b),
srgb.a
);
// Store linear result
textureStore(output_tex, gid.xy, linear);
}

View File

@ -1,32 +1,50 @@
/// Stage pane - main animation canvas with Vello rendering /// Stage pane - main animation canvas with Vello rendering
/// ///
/// Renders composited layers using Vello GPU renderer via egui callbacks. /// Renders composited layers using Vello GPU renderer via egui callbacks.
/// Supports HDR compositing pipeline with per-layer buffers and effects.
use eframe::egui; use eframe::egui;
use lightningbeam_core::action::Action; use lightningbeam_core::action::Action;
use lightningbeam_core::clip::ClipInstance; use lightningbeam_core::clip::ClipInstance;
use lightningbeam_core::gpu::{BufferPool, Compositor};
use lightningbeam_core::layer::{AnyLayer, AudioLayer, AudioLayerType, VideoLayer, VectorLayer}; use lightningbeam_core::layer::{AnyLayer, AudioLayer, AudioLayerType, VideoLayer, VectorLayer};
use super::{DragClipType, NodePath, PaneRenderer, SharedPaneState}; use super::{DragClipType, NodePath, PaneRenderer, SharedPaneState};
use std::sync::{Arc, Mutex, OnceLock}; use std::sync::{Arc, Mutex, OnceLock};
use vello::kurbo::Shape; use vello::kurbo::Shape;
/// Enable HDR compositing pipeline (per-layer rendering with proper opacity)
/// Set to true to use the new pipeline, false for legacy single-scene rendering
const USE_HDR_COMPOSITING: bool = true; // Enabled for testing
/// Shared Vello resources (created once, reused by all Stage panes) /// Shared Vello resources (created once, reused by all Stage panes)
struct SharedVelloResources { struct SharedVelloResources {
renderer: Arc<Mutex<vello::Renderer>>, renderer: Arc<Mutex<vello::Renderer>>,
blit_pipeline: wgpu::RenderPipeline, blit_pipeline: wgpu::RenderPipeline,
blit_bind_group_layout: wgpu::BindGroupLayout, blit_bind_group_layout: wgpu::BindGroupLayout,
/// HDR to sRGB blit pipeline (linear→sRGB conversion for display)
hdr_blit_pipeline: wgpu::RenderPipeline,
sampler: wgpu::Sampler, sampler: wgpu::Sampler,
/// Shared image cache for avoiding re-decoding images every frame /// Shared image cache for avoiding re-decoding images every frame
image_cache: Mutex<lightningbeam_core::renderer::ImageCache>, image_cache: Mutex<lightningbeam_core::renderer::ImageCache>,
/// Video manager for video decoding and frame caching /// Video manager for video decoding and frame caching
video_manager: std::sync::Arc<std::sync::Mutex<lightningbeam_core::video::VideoManager>>, video_manager: std::sync::Arc<std::sync::Mutex<lightningbeam_core::video::VideoManager>>,
/// Buffer pool for HDR compositing pipeline
buffer_pool: Mutex<BufferPool>,
/// Compositor for layer blending
compositor: Compositor,
} }
/// Per-instance Vello resources (created for each Stage pane) /// Per-instance Vello resources (created for each Stage pane)
struct InstanceVelloResources { struct InstanceVelloResources {
/// Output texture (Rgba8Unorm for legacy, used for final blit)
texture: Option<wgpu::Texture>, texture: Option<wgpu::Texture>,
texture_view: Option<wgpu::TextureView>, texture_view: Option<wgpu::TextureView>,
blit_bind_group: Option<wgpu::BindGroup>, blit_bind_group: Option<wgpu::BindGroup>,
/// HDR composite texture (Rgba16Float for internal compositing)
hdr_texture: Option<wgpu::Texture>,
hdr_texture_view: Option<wgpu::TextureView>,
/// Bind group for HDR to sRGB conversion
hdr_blit_bind_group: Option<wgpu::BindGroup>,
} }
/// Container for all Vello instances, stored in egui's CallbackResources /// Container for all Vello instances, stored in egui's CallbackResources
@ -118,6 +136,47 @@ impl SharedVelloResources {
cache: None, cache: None,
}); });
// Create HDR blit pipeline (linear→sRGB conversion for display output)
// Uses linear_to_srgb.wgsl which reads from Rgba16Float HDR texture
let hdr_shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some("hdr_blit_shader"),
source: wgpu::ShaderSource::Wgsl(include_str!("shaders/linear_to_srgb.wgsl").into()),
});
let hdr_blit_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("hdr_blit_pipeline"),
layout: Some(&pipeline_layout), // Reuse same layout (texture + sampler)
vertex: wgpu::VertexState {
module: &hdr_shader,
entry_point: Some("vs_main"),
buffers: &[],
compilation_options: Default::default(),
},
fragment: Some(wgpu::FragmentState {
module: &hdr_shader,
entry_point: Some("fs_main"),
targets: &[Some(wgpu::ColorTargetState {
format: wgpu::TextureFormat::Rgba8Unorm, // Output to display-ready texture
blend: None, // No blending - direct replacement
write_mask: wgpu::ColorWrites::ALL,
})],
compilation_options: Default::default(),
}),
primitive: wgpu::PrimitiveState {
topology: wgpu::PrimitiveTopology::TriangleStrip,
strip_index_format: None,
front_face: wgpu::FrontFace::Ccw,
cull_mode: None,
unclipped_depth: false,
polygon_mode: wgpu::PolygonMode::Fill,
conservative: false,
},
depth_stencil: None,
multisample: wgpu::MultisampleState::default(),
multiview: None,
cache: None,
});
// Create sampler // Create sampler
let sampler = device.create_sampler(&wgpu::SamplerDescriptor { let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
label: Some("vello_blit_sampler"), label: Some("vello_blit_sampler"),
@ -130,15 +189,25 @@ impl SharedVelloResources {
..Default::default() ..Default::default()
}); });
println!("✅ Vello shared resources initialized (renderer and shaders)"); // Initialize buffer pool for HDR compositing
let buffer_pool = BufferPool::new();
// Initialize compositor for layer blending
// Use HDR format for internal compositing
let compositor = Compositor::new(device, lightningbeam_core::gpu::HDR_FORMAT);
println!("✅ Vello shared resources initialized (renderer, shaders, and HDR compositor)");
Ok(Self { Ok(Self {
renderer: Arc::new(Mutex::new(renderer)), renderer: Arc::new(Mutex::new(renderer)),
blit_pipeline, blit_pipeline,
blit_bind_group_layout, blit_bind_group_layout,
hdr_blit_pipeline,
sampler, sampler,
image_cache: Mutex::new(lightningbeam_core::renderer::ImageCache::new()), image_cache: Mutex::new(lightningbeam_core::renderer::ImageCache::new()),
video_manager, video_manager,
buffer_pool: Mutex::new(buffer_pool),
compositor,
}) })
} }
} }
@ -149,6 +218,9 @@ impl InstanceVelloResources {
texture: None, texture: None,
texture_view: None, texture_view: None,
blit_bind_group: None, blit_bind_group: None,
hdr_texture: None,
hdr_texture_view: None,
hdr_blit_bind_group: None,
} }
} }
@ -172,7 +244,11 @@ impl InstanceVelloResources {
sample_count: 1, sample_count: 1,
dimension: wgpu::TextureDimension::D2, dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8Unorm, format: wgpu::TextureFormat::Rgba8Unorm,
usage: wgpu::TextureUsages::STORAGE_BINDING | wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_SRC, // RENDER_ATTACHMENT needed for HDR blit, STORAGE_BINDING for Vello
usage: wgpu::TextureUsages::STORAGE_BINDING
| wgpu::TextureUsages::TEXTURE_BINDING
| wgpu::TextureUsages::COPY_SRC
| wgpu::TextureUsages::RENDER_ATTACHMENT,
view_formats: &[], view_formats: &[],
}); });
@ -198,6 +274,57 @@ impl InstanceVelloResources {
self.texture_view = Some(texture_view); self.texture_view = Some(texture_view);
self.blit_bind_group = Some(bind_group); self.blit_bind_group = Some(bind_group);
} }
/// Ensure HDR texture exists for compositing pipeline
fn ensure_hdr_texture(&mut self, device: &wgpu::Device, shared: &SharedVelloResources, width: u32, height: u32) {
// Clamp to GPU limits
let max_texture_size = 8192;
let width = width.min(max_texture_size);
let height = height.min(max_texture_size);
// Only recreate if size changed
if let Some(tex) = &self.hdr_texture {
if tex.width() == width && tex.height() == height {
return;
}
}
// Create HDR texture (Rgba16Float for internal compositing)
let hdr_texture = device.create_texture(&wgpu::TextureDescriptor {
label: Some("hdr_composite_output"),
size: wgpu::Extent3d { width, height, depth_or_array_layers: 1 },
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: lightningbeam_core::gpu::HDR_FORMAT,
usage: wgpu::TextureUsages::RENDER_ATTACHMENT
| wgpu::TextureUsages::TEXTURE_BINDING
| wgpu::TextureUsages::COPY_SRC,
view_formats: &[],
});
let hdr_texture_view = hdr_texture.create_view(&wgpu::TextureViewDescriptor::default());
// Create bind group for HDR to sRGB conversion (uses same layout as blit)
let hdr_blit_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("hdr_blit_bind_group"),
layout: &shared.blit_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&hdr_texture_view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&shared.sampler),
},
],
});
self.hdr_texture = Some(hdr_texture);
self.hdr_texture_view = Some(hdr_texture_view);
self.hdr_blit_bind_group = Some(hdr_blit_bind_group);
}
} }
/// Callback for Vello rendering within egui /// Callback for Vello rendering within egui
@ -287,24 +414,144 @@ impl egui_wgpu::CallbackTrait for VelloCallback {
instance_resources.ensure_texture(device, &shared, width, height); instance_resources.ensure_texture(device, &shared, width, height);
// Build Vello scene using the document renderer
let mut scene = vello::Scene::new();
// Build camera transform: translate for pan, scale for zoom // Build camera transform: translate for pan, scale for zoom
use vello::kurbo::Affine; use vello::kurbo::Affine;
let camera_transform = Affine::translate((self.pan_offset.x as f64, self.pan_offset.y as f64)) let camera_transform = Affine::translate((self.pan_offset.x as f64, self.pan_offset.y as f64))
* Affine::scale(self.zoom as f64); * Affine::scale(self.zoom as f64);
// Render the document to the scene with camera transform // Choose rendering path based on HDR compositing flag
let mut image_cache = shared.image_cache.lock().unwrap(); let mut scene = if USE_HDR_COMPOSITING {
lightningbeam_core::renderer::render_document_with_transform( // HDR Compositing Pipeline: render each layer separately for proper opacity
&self.document, // Uses incremental compositing: render layer → composite onto accumulator → release buffer
&mut scene, // This means we only need 1 layer buffer at a time (plus the HDR accumulator)
camera_transform, instance_resources.ensure_hdr_texture(device, &shared, width, height);
&mut image_cache,
&shared.video_manager, let mut image_cache = shared.image_cache.lock().unwrap();
); let composite_result = lightningbeam_core::renderer::render_document_for_compositing(
drop(image_cache); // Explicitly release lock before other operations &self.document,
camera_transform,
&mut image_cache,
&shared.video_manager,
);
drop(image_cache);
// Get buffer pool for layer rendering
let mut buffer_pool = shared.buffer_pool.lock().unwrap();
// Buffer spec for layer rendering (Vello outputs Rgba8)
let layer_spec = lightningbeam_core::gpu::BufferSpec::new(
width,
height,
lightningbeam_core::gpu::BufferFormat::Rgba8Srgb,
);
// Render parameters for Vello (transparent background for layers)
let layer_render_params = vello::RenderParams {
base_color: vello::peniko::Color::TRANSPARENT,
width,
height,
antialiasing_method: vello::AaConfig::Msaa16,
};
// First, render background and composite it
// The background scene contains only a rectangle at document bounds,
// so we use TRANSPARENT base_color to not fill the whole viewport
let bg_handle = buffer_pool.acquire(device, layer_spec);
if let (Some(bg_view), Some(hdr_view)) = (buffer_pool.get_view(bg_handle), &instance_resources.hdr_texture_view) {
// Render background scene with transparent base (scene has the bg rect)
let bg_render_params = vello::RenderParams {
base_color: vello::peniko::Color::TRANSPARENT,
width,
height,
antialiasing_method: vello::AaConfig::Msaa16,
};
if let Ok(mut renderer) = shared.renderer.lock() {
renderer.render_to_texture(device, queue, &composite_result.background, bg_view, &bg_render_params).ok();
}
// Composite background onto HDR texture (first layer, clears to dark gray for stage area)
let bg_compositor_layer = lightningbeam_core::gpu::CompositorLayer::normal(bg_handle, 1.0);
let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("bg_composite_encoder"),
});
// Clear to dark gray (stage background outside document bounds)
let stage_bg = [45.0 / 255.0, 45.0 / 255.0, 48.0 / 255.0, 1.0];
shared.compositor.composite(
device,
queue,
&mut encoder,
&[bg_compositor_layer],
&buffer_pool,
hdr_view,
Some(stage_bg),
);
queue.submit(Some(encoder.finish()));
}
buffer_pool.release(bg_handle);
// Now render and composite each layer incrementally
for rendered_layer in &composite_result.layers {
if !rendered_layer.has_content {
continue;
}
// Acquire a buffer for this layer
let layer_handle = buffer_pool.acquire(device, layer_spec);
if let (Some(layer_view), Some(hdr_view)) = (buffer_pool.get_view(layer_handle), &instance_resources.hdr_texture_view) {
// Render layer scene to buffer
if let Ok(mut renderer) = shared.renderer.lock() {
renderer.render_to_texture(device, queue, &rendered_layer.scene, layer_view, &layer_render_params).ok();
}
// Composite this layer onto the HDR accumulator with its opacity
let compositor_layer = lightningbeam_core::gpu::CompositorLayer::new(
layer_handle,
rendered_layer.opacity,
rendered_layer.blend_mode,
);
let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("layer_composite_encoder"),
});
shared.compositor.composite(
device,
queue,
&mut encoder,
&[compositor_layer],
&buffer_pool,
hdr_view,
None, // Don't clear - blend onto existing content
);
queue.submit(Some(encoder.finish()));
}
// Release buffer immediately - it can be reused for next layer
buffer_pool.release(layer_handle);
}
// Advance frame counter for buffer cleanup
buffer_pool.next_frame();
drop(buffer_pool);
// For drag preview and other overlays, we still need a scene
// Create an empty scene - the composited result is already in hdr_texture
vello::Scene::new()
} else {
// Legacy single-scene rendering
let mut scene = vello::Scene::new();
let mut image_cache = shared.image_cache.lock().unwrap();
lightningbeam_core::renderer::render_document_with_transform(
&self.document,
&mut scene,
camera_transform,
&mut image_cache,
&shared.video_manager,
);
drop(image_cache);
scene
};
// Render drag preview objects with transparency // Render drag preview objects with transparency
if let (Some(delta), Some(active_layer_id)) = (self.drag_delta, self.active_layer_id) { if let (Some(delta), Some(active_layer_id)) = (self.drag_delta, self.active_layer_id) {
@ -1292,17 +1539,97 @@ impl egui_wgpu::CallbackTrait for VelloCallback {
// Render scene to texture using shared renderer // Render scene to texture using shared renderer
if let Some(texture_view) = &instance_resources.texture_view { if let Some(texture_view) = &instance_resources.texture_view {
let render_params = vello::RenderParams { if USE_HDR_COMPOSITING {
base_color: vello::peniko::Color::from_rgb8(45, 45, 48), // Dark background // HDR mode: First render overlays to HDR texture, then blit to output
width,
height,
antialiasing_method: vello::AaConfig::Msaa16,
};
if let Ok(mut renderer) = shared.renderer.lock() { // Step 1: Render overlay scene (selection handles, drag previews, etc.) to HDR texture
renderer // The overlay scene was built above with all the UI elements
.render_to_texture(device, queue, &scene, texture_view, &render_params) if let Some(hdr_view) = &instance_resources.hdr_texture_view {
.ok(); let mut buffer_pool = shared.buffer_pool.lock().unwrap();
let overlay_spec = lightningbeam_core::gpu::BufferSpec::new(
width,
height,
lightningbeam_core::gpu::BufferFormat::Rgba8Srgb,
);
let overlay_handle = buffer_pool.acquire(device, overlay_spec);
if let Some(overlay_view) = buffer_pool.get_view(overlay_handle) {
// Render overlay scene to temp buffer
let overlay_params = vello::RenderParams {
base_color: vello::peniko::Color::TRANSPARENT,
width,
height,
antialiasing_method: vello::AaConfig::Msaa16,
};
if let Ok(mut renderer) = shared.renderer.lock() {
renderer.render_to_texture(device, queue, &scene, overlay_view, &overlay_params).ok();
}
// Composite overlay onto HDR texture (sRGB→linear conversion happens in compositor)
let overlay_layer = lightningbeam_core::gpu::CompositorLayer::normal(overlay_handle, 1.0);
let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("overlay_composite_encoder"),
});
shared.compositor.composite(
device,
queue,
&mut encoder,
&[overlay_layer],
&buffer_pool,
hdr_view,
None, // Don't clear - blend onto existing content
);
queue.submit(Some(encoder.finish()));
}
buffer_pool.release(overlay_handle);
drop(buffer_pool);
}
// Step 2: Blit HDR texture to output with linear→sRGB conversion
if let Some(hdr_bind_group) = &instance_resources.hdr_blit_bind_group {
let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("hdr_to_srgb_encoder"),
});
{
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: Some("hdr_to_srgb_pass"),
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
view: texture_view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color::BLACK),
store: wgpu::StoreOp::Store,
},
})],
depth_stencil_attachment: None,
timestamp_writes: None,
occlusion_query_set: None,
});
render_pass.set_pipeline(&shared.hdr_blit_pipeline);
render_pass.set_bind_group(0, hdr_bind_group, &[]);
render_pass.draw(0..3, 0..1); // Full-screen triangle (3 vertices)
}
queue.submit(Some(encoder.finish()));
}
} else {
// Legacy mode: Direct single-scene rendering
let render_params = vello::RenderParams {
base_color: vello::peniko::Color::from_rgb8(45, 45, 48), // Dark background
width,
height,
antialiasing_method: vello::AaConfig::Msaa16,
};
if let Ok(mut renderer) = shared.renderer.lock() {
renderer
.render_to_texture(device, queue, &scene, texture_view, &render_params)
.ok();
}
} }
} }