From c8a5cbfc89cb22b912d31c83c75a785f7f35e1e7 Mon Sep 17 00:00:00 2001 From: Skyler Lehmkuhl Date: Mon, 8 Dec 2025 10:20:50 -0500 Subject: [PATCH] fix color space for effects and enable them in video export --- .../src/actions/move_clip_instances.rs | 8 +- .../src/actions/trim_clip_instances.rs | 10 +- .../src/gpu/color_convert.rs | 215 +++++++ .../lightningbeam-core/src/gpu/compositor.rs | 26 +- .../lightningbeam-core/src/gpu/mod.rs | 3 + .../lightningbeam-editor/src/export/mod.rs | 20 +- .../src/export/video_exporter.rs | 596 +++++++++++++++++- .../lightningbeam-editor/src/panes/stage.rs | 67 +- 8 files changed, 894 insertions(+), 51 deletions(-) create mode 100644 lightningbeam-ui/lightningbeam-core/src/gpu/color_convert.rs diff --git a/lightningbeam-ui/lightningbeam-core/src/actions/move_clip_instances.rs b/lightningbeam-ui/lightningbeam-core/src/actions/move_clip_instances.rs index c61ea96..b9ff11f 100644 --- a/lightningbeam-ui/lightningbeam-core/src/actions/move_clip_instances.rs +++ b/lightningbeam-ui/lightningbeam-core/src/actions/move_clip_instances.rs @@ -55,7 +55,7 @@ impl Action for MoveClipInstancesAction { AnyLayer::Vector(vl) => &vl.clip_instances, AnyLayer::Audio(al) => &al.clip_instances, AnyLayer::Video(vl) => &vl.clip_instances, - AnyLayer::Effect(_) => continue, // Effect layers don't have clip instances + AnyLayer::Effect(el) => &el.clip_instances, }; if let Some(instance) = clip_instances.iter().find(|ci| ci.id == *member_instance_id) { @@ -94,7 +94,7 @@ impl Action for MoveClipInstancesAction { AnyLayer::Audio(al) => &al.clip_instances, AnyLayer::Video(vl) => &vl.clip_instances, AnyLayer::Vector(vl) => &vl.clip_instances, - AnyLayer::Effect(_) => continue, // Effect layers don't have clip instances + AnyLayer::Effect(el) => &el.clip_instances, }; let instance = clip_instances.iter() @@ -141,7 +141,7 @@ impl Action for MoveClipInstancesAction { AnyLayer::Vector(vl) => &mut vl.clip_instances, AnyLayer::Audio(al) => &mut al.clip_instances, AnyLayer::Video(vl) => &mut vl.clip_instances, - AnyLayer::Effect(_) => continue, // Effect layers don't have clip instances + AnyLayer::Effect(el) => &mut el.clip_instances, }; // Update timeline_start for each clip instance @@ -166,7 +166,7 @@ impl Action for MoveClipInstancesAction { AnyLayer::Vector(vl) => &mut vl.clip_instances, AnyLayer::Audio(al) => &mut al.clip_instances, AnyLayer::Video(vl) => &mut vl.clip_instances, - AnyLayer::Effect(_) => continue, // Effect layers don't have clip instances + AnyLayer::Effect(el) => &mut el.clip_instances, }; // Restore original timeline_start for each clip instance diff --git a/lightningbeam-ui/lightningbeam-core/src/actions/trim_clip_instances.rs b/lightningbeam-ui/lightningbeam-core/src/actions/trim_clip_instances.rs index f750e68..3aee14c 100644 --- a/lightningbeam-ui/lightningbeam-core/src/actions/trim_clip_instances.rs +++ b/lightningbeam-ui/lightningbeam-core/src/actions/trim_clip_instances.rs @@ -98,7 +98,7 @@ impl Action for TrimClipInstancesAction { AnyLayer::Vector(vl) => &vl.clip_instances, AnyLayer::Audio(al) => &al.clip_instances, AnyLayer::Video(vl) => &vl.clip_instances, - AnyLayer::Effect(_) => continue, + AnyLayer::Effect(el) => &el.clip_instances, }; if let Some(instance) = clip_instances.iter().find(|ci| ci.id == *member_instance_id) { @@ -133,7 +133,7 @@ impl Action for TrimClipInstancesAction { AnyLayer::Vector(vl) => &vl.clip_instances, AnyLayer::Audio(al) => &al.clip_instances, AnyLayer::Video(vl) => &vl.clip_instances, - AnyLayer::Effect(_) => continue, + AnyLayer::Effect(el) => &el.clip_instances, }; if let Some(instance) = clip_instances.iter().find(|ci| ci.id == *member_instance_id) { @@ -175,7 +175,7 @@ impl Action for TrimClipInstancesAction { AnyLayer::Audio(al) => &al.clip_instances, AnyLayer::Video(vl) => &vl.clip_instances, AnyLayer::Vector(vl) => &vl.clip_instances, - AnyLayer::Effect(_) => continue, // Effect layers don't have clip instances + AnyLayer::Effect(el) => &el.clip_instances, }; let instance = clip_instances.iter() @@ -266,7 +266,7 @@ impl Action for TrimClipInstancesAction { AnyLayer::Vector(vl) => &mut vl.clip_instances, AnyLayer::Audio(al) => &mut al.clip_instances, AnyLayer::Video(vl) => &mut vl.clip_instances, - AnyLayer::Effect(_) => continue, // Effect layers don't have clip instances + AnyLayer::Effect(el) => &mut el.clip_instances, }; // Apply trims @@ -304,7 +304,7 @@ impl Action for TrimClipInstancesAction { AnyLayer::Vector(vl) => &mut vl.clip_instances, AnyLayer::Audio(al) => &mut al.clip_instances, AnyLayer::Video(vl) => &mut vl.clip_instances, - AnyLayer::Effect(_) => continue, // Effect layers don't have clip instances + AnyLayer::Effect(el) => &mut el.clip_instances, }; // Restore original trim values diff --git a/lightningbeam-ui/lightningbeam-core/src/gpu/color_convert.rs b/lightningbeam-ui/lightningbeam-core/src/gpu/color_convert.rs new file mode 100644 index 0000000..42cca3d --- /dev/null +++ b/lightningbeam-ui/lightningbeam-core/src/gpu/color_convert.rs @@ -0,0 +1,215 @@ +//! Color space conversion pipelines for GPU rendering +//! +//! Provides sRGB ↔ linear color space conversion passes for the HDR compositing pipeline. +//! These are used to convert Vello's sRGB output to linear HDR for compositing, +//! and to convert the final HDR result back to sRGB for display. + +use super::HDR_FORMAT; + +/// GPU pipeline for sRGB to linear color space conversion +/// +/// Converts Rgba8Srgb textures to Rgba16Float linear textures. +/// Used after Vello rendering to prepare layers for HDR compositing. +pub struct SrgbToLinearConverter { + pipeline: wgpu::RenderPipeline, + bind_group_layout: wgpu::BindGroupLayout, + sampler: wgpu::Sampler, +} + +impl SrgbToLinearConverter { + /// Create a new sRGB to linear converter + pub fn new(device: &wgpu::Device) -> Self { + // Create bind group layout + let bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { + label: Some("srgb_to_linear_bind_group_layout"), + entries: &[ + // Source sRGB texture + wgpu::BindGroupLayoutEntry { + binding: 0, + visibility: wgpu::ShaderStages::FRAGMENT, + ty: wgpu::BindingType::Texture { + sample_type: wgpu::TextureSampleType::Float { filterable: true }, + view_dimension: wgpu::TextureViewDimension::D2, + multisampled: false, + }, + count: None, + }, + // Sampler + wgpu::BindGroupLayoutEntry { + binding: 1, + visibility: wgpu::ShaderStages::FRAGMENT, + ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering), + count: None, + }, + ], + }); + + // Create pipeline layout + let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor { + label: Some("srgb_to_linear_pipeline_layout"), + bind_group_layouts: &[&bind_group_layout], + push_constant_ranges: &[], + }); + + // Create shader module + let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor { + label: Some("srgb_to_linear_shader"), + source: wgpu::ShaderSource::Wgsl(SRGB_TO_LINEAR_SHADER.into()), + }); + + // Create render pipeline - outputs to HDR format + let pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor { + label: Some("srgb_to_linear_pipeline"), + layout: Some(&pipeline_layout), + vertex: wgpu::VertexState { + module: &shader, + entry_point: Some("vs_main"), + buffers: &[], + compilation_options: wgpu::PipelineCompilationOptions::default(), + }, + fragment: Some(wgpu::FragmentState { + module: &shader, + entry_point: Some("fs_main"), + targets: &[Some(wgpu::ColorTargetState { + format: HDR_FORMAT, + blend: None, // No blending - direct write + write_mask: wgpu::ColorWrites::ALL, + })], + compilation_options: wgpu::PipelineCompilationOptions::default(), + }), + primitive: wgpu::PrimitiveState { + topology: wgpu::PrimitiveTopology::TriangleStrip, + strip_index_format: None, + front_face: wgpu::FrontFace::Ccw, + cull_mode: None, + polygon_mode: wgpu::PolygonMode::Fill, + unclipped_depth: false, + conservative: false, + }, + depth_stencil: None, + multisample: wgpu::MultisampleState::default(), + multiview: None, + cache: None, + }); + + // Create sampler + let sampler = device.create_sampler(&wgpu::SamplerDescriptor { + label: Some("srgb_to_linear_sampler"), + address_mode_u: wgpu::AddressMode::ClampToEdge, + address_mode_v: wgpu::AddressMode::ClampToEdge, + address_mode_w: wgpu::AddressMode::ClampToEdge, + mag_filter: wgpu::FilterMode::Linear, + min_filter: wgpu::FilterMode::Linear, + mipmap_filter: wgpu::FilterMode::Nearest, + ..Default::default() + }); + + Self { + pipeline, + bind_group_layout, + sampler, + } + } + + /// Convert an sRGB texture to linear HDR + /// + /// Reads from `source_view` (sRGB) and writes to `dest_view` (HDR linear). + pub fn convert( + &self, + device: &wgpu::Device, + encoder: &mut wgpu::CommandEncoder, + source_view: &wgpu::TextureView, + dest_view: &wgpu::TextureView, + ) { + // Create bind group for this conversion + let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor { + label: Some("srgb_to_linear_bind_group"), + layout: &self.bind_group_layout, + entries: &[ + wgpu::BindGroupEntry { + binding: 0, + resource: wgpu::BindingResource::TextureView(source_view), + }, + wgpu::BindGroupEntry { + binding: 1, + resource: wgpu::BindingResource::Sampler(&self.sampler), + }, + ], + }); + + // Render pass + let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor { + label: Some("srgb_to_linear_pass"), + color_attachments: &[Some(wgpu::RenderPassColorAttachment { + view: dest_view, + resolve_target: None, + ops: wgpu::Operations { + load: wgpu::LoadOp::Clear(wgpu::Color::TRANSPARENT), + store: wgpu::StoreOp::Store, + }, + })], + depth_stencil_attachment: None, + occlusion_query_set: None, + timestamp_writes: None, + }); + + render_pass.set_pipeline(&self.pipeline); + render_pass.set_bind_group(0, &bind_group, &[]); + render_pass.draw(0..4, 0..1); + } +} + +/// WGSL shader for sRGB to linear conversion +const SRGB_TO_LINEAR_SHADER: &str = r#" +// sRGB to Linear color space conversion shader + +@group(0) @binding(0) var source_tex: texture_2d; +@group(0) @binding(1) var source_sampler: sampler; + +struct VertexOutput { + @builtin(position) position: vec4, + @location(0) uv: vec2, +} + +// Fullscreen triangle strip +@vertex +fn vs_main(@builtin(vertex_index) vertex_index: u32) -> VertexOutput { + var out: VertexOutput; + + let x = f32((vertex_index & 1u) << 1u); + let y = f32(vertex_index & 2u); + + out.position = vec4(x * 2.0 - 1.0, 1.0 - y * 2.0, 0.0, 1.0); + out.uv = vec2(x, y); + + return out; +} + +// sRGB to linear color space conversion (per channel) +fn srgb_to_linear_channel(c: f32) -> f32 { + return select( + pow((c + 0.055) / 1.055, 2.4), + c / 12.92, + c <= 0.04045 + ); +} + +fn srgb_to_linear(color: vec3) -> vec3 { + return vec3( + srgb_to_linear_channel(color.r), + srgb_to_linear_channel(color.g), + srgb_to_linear_channel(color.b) + ); +} + +@fragment +fn fs_main(in: VertexOutput) -> @location(0) vec4 { + let src = textureSample(source_tex, source_sampler, in.uv); + + // Convert sRGB to linear + let linear_rgb = srgb_to_linear(src.rgb); + + // Alpha stays unchanged + return vec4(linear_rgb, src.a); +} +"#; diff --git a/lightningbeam-ui/lightningbeam-core/src/gpu/compositor.rs b/lightningbeam-ui/lightningbeam-core/src/gpu/compositor.rs index d313a4f..be8ab9d 100644 --- a/lightningbeam-ui/lightningbeam-core/src/gpu/compositor.rs +++ b/lightningbeam-ui/lightningbeam-core/src/gpu/compositor.rs @@ -406,25 +406,9 @@ fn vs_main(@builtin(vertex_index) vertex_index: u32) -> VertexOutput { return out; } -// sRGB to linear color space conversion -// Vello outputs sRGB-encoded colors, we need linear for correct HDR blending -fn srgb_to_linear_channel(c: f32) -> f32 { - return select( - pow((c + 0.055) / 1.055, 2.4), - c / 12.92, - c <= 0.04045 - ); -} - -fn srgb_to_linear(color: vec3) -> vec3 { - return vec3( - srgb_to_linear_channel(color.r), - srgb_to_linear_channel(color.g), - srgb_to_linear_channel(color.b) - ); -} - // Blend mode implementations +// NOTE: All inputs are expected to be in linear HDR color space. +// sRGB to linear conversion happens in a separate pass before compositing. fn blend_normal(src: vec3, dst: vec3) -> vec3 { return src; } @@ -537,13 +521,11 @@ fn apply_blend(src: vec3, dst: vec3, mode: u32) -> vec3 { fn fs_main(in: VertexOutput) -> @location(0) vec4 { let src = textureSample(source_tex, source_sampler, in.uv); - // Convert Vello's sRGB output to linear for correct HDR blending - let linear_rgb = srgb_to_linear(src.rgb); - + // Input is already in linear HDR color space (converted in separate pass) // Apply opacity let src_alpha = src.a * uniforms.opacity; // Output premultiplied alpha in linear color space - return vec4(linear_rgb * src_alpha, src_alpha); + return vec4(src.rgb * src_alpha, src_alpha); } "#; diff --git a/lightningbeam-ui/lightningbeam-core/src/gpu/mod.rs b/lightningbeam-ui/lightningbeam-core/src/gpu/mod.rs index 419b741..681f245 100644 --- a/lightningbeam-ui/lightningbeam-core/src/gpu/mod.rs +++ b/lightningbeam-ui/lightningbeam-core/src/gpu/mod.rs @@ -4,13 +4,16 @@ // - Buffer pooling for efficient render target management // - Compositor for layer blending with proper opacity // - Effect pipeline for GPU shader effects +// - Color space conversion (sRGB ↔ linear) pub mod buffer_pool; +pub mod color_convert; pub mod compositor; pub mod effect_processor; // Re-export commonly used types pub use buffer_pool::{BufferHandle, BufferPool, BufferSpec, BufferFormat}; +pub use color_convert::SrgbToLinearConverter; pub use compositor::{Compositor, CompositorLayer, BlendMode}; pub use effect_processor::{EffectProcessor, EffectUniforms}; diff --git a/lightningbeam-ui/lightningbeam-editor/src/export/mod.rs b/lightningbeam-ui/lightningbeam-editor/src/export/mod.rs index b6b2d5c..a5de980 100644 --- a/lightningbeam-ui/lightningbeam-editor/src/export/mod.rs +++ b/lightningbeam-ui/lightningbeam-editor/src/export/mod.rs @@ -42,6 +42,8 @@ pub struct VideoExportState { height: u32, /// Channel to send rendered frames to encoder thread frame_tx: Option>, + /// HDR GPU resources for compositing pipeline (effects, color conversion) + gpu_resources: Option, } /// Export orchestrator that manages the export process @@ -619,6 +621,7 @@ impl ExportOrchestrator { self.thread_handle = Some(handle); // Initialize video export state + // GPU resources will be initialized lazily on first frame (needs device) self.video_state = Some(VideoExportState { current_frame: 0, total_frames, @@ -628,6 +631,7 @@ impl ExportOrchestrator { width, height, frame_tx: Some(frame_tx), + gpu_resources: None, }); println!("🎬 [VIDEO EXPORT] Encoder thread spawned, ready for frames"); @@ -741,6 +745,7 @@ impl ExportOrchestrator { }); // Initialize video export state for incremental rendering + // GPU resources will be initialized lazily on first frame (needs device) self.video_state = Some(VideoExportState { current_frame: 0, total_frames, @@ -750,6 +755,7 @@ impl ExportOrchestrator { width: video_width, height: video_height, frame_tx: Some(frame_tx), + gpu_resources: None, }); // Initialize parallel export state @@ -800,6 +806,8 @@ impl ExportOrchestrator { if let Some(tx) = state.frame_tx.take() { tx.send(VideoFrameMessage::Done).ok(); } + // Clean up GPU resources + state.gpu_resources = None; return Ok(false); } @@ -810,9 +818,16 @@ impl ExportOrchestrator { let width = state.width; let height = state.height; - // Render frame to RGBA buffer + // Initialize GPU resources on first frame (needs device) + if state.gpu_resources.is_none() { + println!("🎬 [VIDEO EXPORT] Initializing HDR GPU resources for {}x{}", width, height); + state.gpu_resources = Some(video_exporter::ExportGpuResources::new(device, width, height)); + } + + // Render frame to RGBA buffer using HDR pipeline (with effects) let mut rgba_buffer = vec![0u8; (width * height * 4) as usize]; - video_exporter::render_frame_to_rgba( + let gpu_resources = state.gpu_resources.as_mut().unwrap(); + video_exporter::render_frame_to_rgba_hdr( document, timestamp, width, @@ -822,6 +837,7 @@ impl ExportOrchestrator { renderer, image_cache, video_manager, + gpu_resources, &mut rgba_buffer, )?; diff --git a/lightningbeam-ui/lightningbeam-editor/src/export/video_exporter.rs b/lightningbeam-ui/lightningbeam-editor/src/export/video_exporter.rs index ac90284..6030a47 100644 --- a/lightningbeam-ui/lightningbeam-editor/src/export/video_exporter.rs +++ b/lightningbeam-ui/lightningbeam-editor/src/export/video_exporter.rs @@ -8,8 +8,12 @@ use ffmpeg_next as ffmpeg; use std::sync::Arc; use lightningbeam_core::document::Document; -use lightningbeam_core::renderer::ImageCache; +use lightningbeam_core::renderer::{ImageCache, render_document_for_compositing, RenderedLayerType}; use lightningbeam_core::video::VideoManager; +use lightningbeam_core::gpu::{ + BufferPool, BufferSpec, BufferFormat, Compositor, CompositorLayer, + SrgbToLinearConverter, EffectProcessor, HDR_FORMAT, +}; /// Reusable frame buffers to avoid allocations struct FrameBuffers { @@ -39,6 +43,227 @@ impl FrameBuffers { } } +/// GPU resources for HDR export pipeline +/// +/// This mirrors the resources in stage.rs SharedVelloResources but is owned +/// by the export system to avoid lifetime/locking issues during export. +pub struct ExportGpuResources { + /// Buffer pool for intermediate render targets + pub buffer_pool: BufferPool, + /// HDR compositor for layer blending + pub compositor: Compositor, + /// sRGB to linear color converter + pub srgb_to_linear: SrgbToLinearConverter, + /// Effect processor for shader effects + pub effect_processor: EffectProcessor, + /// HDR accumulator texture for compositing + pub hdr_texture: wgpu::Texture, + /// View for HDR texture + pub hdr_texture_view: wgpu::TextureView, + /// Linear to sRGB blit pipeline for final output + pub linear_to_srgb_pipeline: wgpu::RenderPipeline, + /// Bind group layout for linear to sRGB blit + pub linear_to_srgb_bind_group_layout: wgpu::BindGroupLayout, + /// Sampler for linear to sRGB conversion + pub linear_to_srgb_sampler: wgpu::Sampler, +} + +impl ExportGpuResources { + /// Create new export GPU resources for the given dimensions + pub fn new(device: &wgpu::Device, width: u32, height: u32) -> Self { + let buffer_pool = BufferPool::new(); + let compositor = Compositor::new(device, HDR_FORMAT); + let srgb_to_linear = SrgbToLinearConverter::new(device); + let effect_processor = EffectProcessor::new(device, HDR_FORMAT); + + // Create HDR accumulator texture + let hdr_texture = device.create_texture(&wgpu::TextureDescriptor { + label: Some("export_hdr_texture"), + size: wgpu::Extent3d { + width, + height, + depth_or_array_layers: 1, + }, + mip_level_count: 1, + sample_count: 1, + dimension: wgpu::TextureDimension::D2, + format: HDR_FORMAT, + usage: wgpu::TextureUsages::RENDER_ATTACHMENT + | wgpu::TextureUsages::TEXTURE_BINDING + | wgpu::TextureUsages::COPY_SRC, + view_formats: &[], + }); + let hdr_texture_view = hdr_texture.create_view(&wgpu::TextureViewDescriptor::default()); + + // Create linear to sRGB blit pipeline + let linear_to_srgb_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { + label: Some("linear_to_srgb_bind_group_layout"), + entries: &[ + wgpu::BindGroupLayoutEntry { + binding: 0, + visibility: wgpu::ShaderStages::FRAGMENT, + ty: wgpu::BindingType::Texture { + sample_type: wgpu::TextureSampleType::Float { filterable: true }, + view_dimension: wgpu::TextureViewDimension::D2, + multisampled: false, + }, + count: None, + }, + wgpu::BindGroupLayoutEntry { + binding: 1, + visibility: wgpu::ShaderStages::FRAGMENT, + ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering), + count: None, + }, + ], + }); + + let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor { + label: Some("linear_to_srgb_pipeline_layout"), + bind_group_layouts: &[&linear_to_srgb_bind_group_layout], + push_constant_ranges: &[], + }); + + let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor { + label: Some("linear_to_srgb_shader"), + source: wgpu::ShaderSource::Wgsl(LINEAR_TO_SRGB_SHADER.into()), + }); + + let linear_to_srgb_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor { + label: Some("linear_to_srgb_pipeline"), + layout: Some(&pipeline_layout), + vertex: wgpu::VertexState { + module: &shader, + entry_point: Some("vs_main"), + buffers: &[], + compilation_options: wgpu::PipelineCompilationOptions::default(), + }, + fragment: Some(wgpu::FragmentState { + module: &shader, + entry_point: Some("fs_main"), + targets: &[Some(wgpu::ColorTargetState { + format: wgpu::TextureFormat::Rgba8Unorm, + blend: None, + write_mask: wgpu::ColorWrites::ALL, + })], + compilation_options: wgpu::PipelineCompilationOptions::default(), + }), + primitive: wgpu::PrimitiveState { + topology: wgpu::PrimitiveTopology::TriangleStrip, + strip_index_format: None, + front_face: wgpu::FrontFace::Ccw, + cull_mode: None, + polygon_mode: wgpu::PolygonMode::Fill, + unclipped_depth: false, + conservative: false, + }, + depth_stencil: None, + multisample: wgpu::MultisampleState::default(), + multiview: None, + cache: None, + }); + + let linear_to_srgb_sampler = device.create_sampler(&wgpu::SamplerDescriptor { + label: Some("linear_to_srgb_sampler"), + address_mode_u: wgpu::AddressMode::ClampToEdge, + address_mode_v: wgpu::AddressMode::ClampToEdge, + address_mode_w: wgpu::AddressMode::ClampToEdge, + mag_filter: wgpu::FilterMode::Linear, + min_filter: wgpu::FilterMode::Linear, + mipmap_filter: wgpu::FilterMode::Nearest, + ..Default::default() + }); + + Self { + buffer_pool, + compositor, + srgb_to_linear, + effect_processor, + hdr_texture, + hdr_texture_view, + linear_to_srgb_pipeline, + linear_to_srgb_bind_group_layout, + linear_to_srgb_sampler, + } + } + + /// Resize the HDR texture if dimensions changed + pub fn resize(&mut self, device: &wgpu::Device, width: u32, height: u32) { + self.hdr_texture = device.create_texture(&wgpu::TextureDescriptor { + label: Some("export_hdr_texture"), + size: wgpu::Extent3d { + width, + height, + depth_or_array_layers: 1, + }, + mip_level_count: 1, + sample_count: 1, + dimension: wgpu::TextureDimension::D2, + format: HDR_FORMAT, + usage: wgpu::TextureUsages::RENDER_ATTACHMENT + | wgpu::TextureUsages::TEXTURE_BINDING + | wgpu::TextureUsages::COPY_SRC, + view_formats: &[], + }); + self.hdr_texture_view = self.hdr_texture.create_view(&wgpu::TextureViewDescriptor::default()); + } +} + +/// WGSL shader for linear to sRGB conversion (for final export output) +const LINEAR_TO_SRGB_SHADER: &str = r#" +// Linear to sRGB color space conversion shader + +@group(0) @binding(0) var source_tex: texture_2d; +@group(0) @binding(1) var source_sampler: sampler; + +struct VertexOutput { + @builtin(position) position: vec4, + @location(0) uv: vec2, +} + +// Fullscreen triangle strip +@vertex +fn vs_main(@builtin(vertex_index) vertex_index: u32) -> VertexOutput { + var out: VertexOutput; + + let x = f32((vertex_index & 1u) << 1u); + let y = f32(vertex_index & 2u); + + out.position = vec4(x * 2.0 - 1.0, 1.0 - y * 2.0, 0.0, 1.0); + out.uv = vec2(x, y); + + return out; +} + +// Linear to sRGB color space conversion (per channel) +fn linear_to_srgb_channel(c: f32) -> f32 { + return select( + 1.055 * pow(c, 1.0 / 2.4) - 0.055, + c * 12.92, + c <= 0.0031308 + ); +} + +fn linear_to_srgb(color: vec3) -> vec3 { + return vec3( + linear_to_srgb_channel(color.r), + linear_to_srgb_channel(color.g), + linear_to_srgb_channel(color.b) + ); +} + +@fragment +fn fs_main(in: VertexOutput) -> @location(0) vec4 { + let src = textureSample(source_tex, source_sampler, in.uv); + + // Convert linear HDR to sRGB + let srgb = linear_to_srgb(src.rgb); + + // Alpha stays unchanged + return vec4(srgb, src.a); +} +"#; + /// Convert RGBA8 pixels to YUV420p format using BT.709 color space /// /// # Arguments @@ -419,6 +644,375 @@ pub fn render_frame_to_rgba( Ok(()) } +/// Render a document frame using the HDR compositing pipeline with effects +/// +/// This function uses the same rendering pipeline as the stage preview, +/// ensuring effects are applied correctly during export. +/// +/// # Arguments +/// * `document` - Document to render (current_time will be modified) +/// * `timestamp` - Time in seconds to render at +/// * `width` - Frame width in pixels +/// * `height` - Frame height in pixels +/// * `device` - wgpu device +/// * `queue` - wgpu queue +/// * `renderer` - Vello renderer +/// * `image_cache` - Image cache for rendering +/// * `video_manager` - Video manager for video clips +/// * `gpu_resources` - HDR GPU resources for compositing +/// * `rgba_buffer` - Output buffer for RGBA pixels (must be width * height * 4 bytes) +/// +/// # Returns +/// Ok(()) on success, Err with message on failure +pub fn render_frame_to_rgba_hdr( + document: &mut Document, + timestamp: f64, + width: u32, + height: u32, + device: &wgpu::Device, + queue: &wgpu::Queue, + renderer: &mut vello::Renderer, + image_cache: &mut ImageCache, + video_manager: &Arc>, + gpu_resources: &mut ExportGpuResources, + rgba_buffer: &mut [u8], +) -> Result<(), String> { + use vello::kurbo::Affine; + + // Set document time to the frame timestamp + document.current_time = timestamp; + + // Use identity transform for export (document coordinates = pixel coordinates) + let base_transform = Affine::IDENTITY; + + // Render document for compositing (returns per-layer scenes) + let composite_result = render_document_for_compositing( + document, + base_transform, + image_cache, + video_manager, + ); + + // Buffer specs for layer rendering + let layer_spec = BufferSpec::new(width, height, BufferFormat::Rgba8Srgb); + let hdr_spec = BufferSpec::new(width, height, BufferFormat::Rgba16Float); + + // Render parameters for Vello (transparent background for layers) + let layer_render_params = vello::RenderParams { + base_color: vello::peniko::Color::TRANSPARENT, + width, + height, + antialiasing_method: vello::AaConfig::Area, + }; + + // First, render background and composite it + let bg_srgb_handle = gpu_resources.buffer_pool.acquire(device, layer_spec); + let bg_hdr_handle = gpu_resources.buffer_pool.acquire(device, hdr_spec); + + if let (Some(bg_srgb_view), Some(bg_hdr_view)) = ( + gpu_resources.buffer_pool.get_view(bg_srgb_handle), + gpu_resources.buffer_pool.get_view(bg_hdr_handle), + ) { + // Render background scene + renderer.render_to_texture(device, queue, &composite_result.background, bg_srgb_view, &layer_render_params) + .map_err(|e| format!("Failed to render background: {}", e))?; + + // Convert sRGB to linear HDR + let mut convert_encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor { + label: Some("export_bg_srgb_to_linear_encoder"), + }); + gpu_resources.srgb_to_linear.convert(device, &mut convert_encoder, bg_srgb_view, bg_hdr_view); + queue.submit(Some(convert_encoder.finish())); + + // Composite background onto HDR texture (first layer, clears to black for export) + let bg_compositor_layer = CompositorLayer::normal(bg_hdr_handle, 1.0); + let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor { + label: Some("export_bg_composite_encoder"), + }); + // Clear to black for export (unlike stage preview which has gray background) + gpu_resources.compositor.composite( + device, + queue, + &mut encoder, + &[bg_compositor_layer], + &gpu_resources.buffer_pool, + &gpu_resources.hdr_texture_view, + Some([0.0, 0.0, 0.0, 1.0]), + ); + queue.submit(Some(encoder.finish())); + } + gpu_resources.buffer_pool.release(bg_srgb_handle); + gpu_resources.buffer_pool.release(bg_hdr_handle); + + // Now render and composite each layer incrementally + for rendered_layer in &composite_result.layers { + if !rendered_layer.has_content { + continue; + } + + match &rendered_layer.layer_type { + RenderedLayerType::Content => { + // Regular content layer - render to sRGB, convert to linear, then composite + let srgb_handle = gpu_resources.buffer_pool.acquire(device, layer_spec); + let hdr_layer_handle = gpu_resources.buffer_pool.acquire(device, hdr_spec); + + if let (Some(srgb_view), Some(hdr_layer_view)) = ( + gpu_resources.buffer_pool.get_view(srgb_handle), + gpu_resources.buffer_pool.get_view(hdr_layer_handle), + ) { + // Render layer scene to sRGB buffer + renderer.render_to_texture(device, queue, &rendered_layer.scene, srgb_view, &layer_render_params) + .map_err(|e| format!("Failed to render layer: {}", e))?; + + // Convert sRGB to linear HDR + let mut convert_encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor { + label: Some("export_layer_srgb_to_linear_encoder"), + }); + gpu_resources.srgb_to_linear.convert(device, &mut convert_encoder, srgb_view, hdr_layer_view); + queue.submit(Some(convert_encoder.finish())); + + // Composite this layer onto the HDR accumulator with its opacity + let compositor_layer = CompositorLayer::new( + hdr_layer_handle, + rendered_layer.opacity, + rendered_layer.blend_mode, + ); + + let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor { + label: Some("export_layer_composite_encoder"), + }); + gpu_resources.compositor.composite( + device, + queue, + &mut encoder, + &[compositor_layer], + &gpu_resources.buffer_pool, + &gpu_resources.hdr_texture_view, + None, // Don't clear - blend onto existing content + ); + queue.submit(Some(encoder.finish())); + } + + gpu_resources.buffer_pool.release(srgb_handle); + gpu_resources.buffer_pool.release(hdr_layer_handle); + } + RenderedLayerType::Effect { effect_instances } => { + // Effect layer - apply effects to the current HDR accumulator + let current_time = document.current_time; + + for effect_instance in effect_instances { + // Get effect definition from document + let Some(effect_def) = document.get_effect_definition(&effect_instance.clip_id) else { + continue; + }; + + // Compile effect if needed + if !gpu_resources.effect_processor.is_compiled(&effect_def.id) { + let success = gpu_resources.effect_processor.compile_effect(device, effect_def); + if !success { + eprintln!("Failed to compile effect: {}", effect_def.name); + continue; + } + } + + // Create EffectInstance from ClipInstance for the processor + let effect_inst = lightningbeam_core::effect::EffectInstance::new( + effect_def, + effect_instance.timeline_start, + effect_instance.timeline_start + effect_instance.effective_duration(lightningbeam_core::effect::EFFECT_DURATION), + ); + + // Acquire temp buffer for effect output (HDR format) + let effect_output_handle = gpu_resources.buffer_pool.acquire(device, hdr_spec); + + if let Some(effect_output_view) = gpu_resources.buffer_pool.get_view(effect_output_handle) { + let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor { + label: Some("export_effect_encoder"), + }); + + // Apply effect: HDR accumulator → effect output buffer + let applied = gpu_resources.effect_processor.apply_effect( + device, + queue, + &mut encoder, + effect_def, + &effect_inst, + &gpu_resources.hdr_texture_view, + effect_output_view, + width, + height, + current_time, + ); + + if applied { + queue.submit(Some(encoder.finish())); + + // Copy effect output back to HDR accumulator + let mut copy_encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor { + label: Some("export_effect_copy_encoder"), + }); + + // Use compositor to copy (replacing content) + let effect_layer = CompositorLayer::normal( + effect_output_handle, + rendered_layer.opacity, // Apply effect layer opacity + ); + gpu_resources.compositor.composite( + device, + queue, + &mut copy_encoder, + &[effect_layer], + &gpu_resources.buffer_pool, + &gpu_resources.hdr_texture_view, + Some([0.0, 0.0, 0.0, 0.0]), // Clear with transparent (we're replacing) + ); + queue.submit(Some(copy_encoder.finish())); + } + } + + gpu_resources.buffer_pool.release(effect_output_handle); + } + } + } + } + + // Advance frame counter for buffer cleanup + gpu_resources.buffer_pool.next_frame(); + + // Create output texture for final sRGB output + let output_texture = device.create_texture(&wgpu::TextureDescriptor { + label: Some("export_output_texture"), + size: wgpu::Extent3d { + width, + height, + depth_or_array_layers: 1, + }, + mip_level_count: 1, + sample_count: 1, + dimension: wgpu::TextureDimension::D2, + format: wgpu::TextureFormat::Rgba8Unorm, + usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::COPY_SRC, + view_formats: &[], + }); + let output_view = output_texture.create_view(&wgpu::TextureViewDescriptor::default()); + + // Convert HDR to sRGB for output + let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor { + label: Some("export_linear_to_srgb_bind_group"), + layout: &gpu_resources.linear_to_srgb_bind_group_layout, + entries: &[ + wgpu::BindGroupEntry { + binding: 0, + resource: wgpu::BindingResource::TextureView(&gpu_resources.hdr_texture_view), + }, + wgpu::BindGroupEntry { + binding: 1, + resource: wgpu::BindingResource::Sampler(&gpu_resources.linear_to_srgb_sampler), + }, + ], + }); + + let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor { + label: Some("export_linear_to_srgb_encoder"), + }); + + { + let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor { + label: Some("export_linear_to_srgb_pass"), + color_attachments: &[Some(wgpu::RenderPassColorAttachment { + view: &output_view, + resolve_target: None, + ops: wgpu::Operations { + load: wgpu::LoadOp::Clear(wgpu::Color::BLACK), + store: wgpu::StoreOp::Store, + }, + })], + depth_stencil_attachment: None, + occlusion_query_set: None, + timestamp_writes: None, + }); + + render_pass.set_pipeline(&gpu_resources.linear_to_srgb_pipeline); + render_pass.set_bind_group(0, &bind_group, &[]); + render_pass.draw(0..4, 0..1); + } + + queue.submit(Some(encoder.finish())); + + // GPU readback: Create staging buffer with proper alignment + let bytes_per_pixel = 4u32; // RGBA8 + let bytes_per_row_alignment = 256u32; + let unpadded_bytes_per_row = width * bytes_per_pixel; + let bytes_per_row = ((unpadded_bytes_per_row + bytes_per_row_alignment - 1) + / bytes_per_row_alignment) * bytes_per_row_alignment; + let buffer_size = (bytes_per_row * height) as u64; + + let staging_buffer = device.create_buffer(&wgpu::BufferDescriptor { + label: Some("export_staging_buffer"), + size: buffer_size, + usage: wgpu::BufferUsages::MAP_READ | wgpu::BufferUsages::COPY_DST, + mapped_at_creation: false, + }); + + // Copy texture to staging buffer + let mut copy_encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor { + label: Some("export_copy_encoder"), + }); + + copy_encoder.copy_texture_to_buffer( + wgpu::TexelCopyTextureInfo { + texture: &output_texture, + mip_level: 0, + origin: wgpu::Origin3d::ZERO, + aspect: wgpu::TextureAspect::All, + }, + wgpu::TexelCopyBufferInfo { + buffer: &staging_buffer, + layout: wgpu::TexelCopyBufferLayout { + offset: 0, + bytes_per_row: Some(bytes_per_row), + rows_per_image: Some(height), + }, + }, + wgpu::Extent3d { + width, + height, + depth_or_array_layers: 1, + }, + ); + + queue.submit(Some(copy_encoder.finish())); + + // Map buffer and read pixels (synchronous) + let buffer_slice = staging_buffer.slice(..); + let (sender, receiver) = std::sync::mpsc::channel(); + buffer_slice.map_async(wgpu::MapMode::Read, move |result| { + sender.send(result).ok(); + }); + + device.poll(wgpu::Maintain::Wait); + + receiver + .recv() + .map_err(|_| "Failed to receive buffer mapping result")? + .map_err(|e| format!("Failed to map buffer: {:?}", e))?; + + // Copy data from mapped buffer to output, removing padding + let data = buffer_slice.get_mapped_range(); + for y in 0..height as usize { + let src_offset = y * bytes_per_row as usize; + let dst_offset = y * unpadded_bytes_per_row as usize; + let row_bytes = unpadded_bytes_per_row as usize; + rgba_buffer[dst_offset..dst_offset + row_bytes] + .copy_from_slice(&data[src_offset..src_offset + row_bytes]); + } + + drop(data); + staging_buffer.unmap(); + + Ok(()) +} + #[cfg(test)] mod tests { use super::*; diff --git a/lightningbeam-ui/lightningbeam-editor/src/panes/stage.rs b/lightningbeam-ui/lightningbeam-editor/src/panes/stage.rs index df6f205..c69e81b 100644 --- a/lightningbeam-ui/lightningbeam-editor/src/panes/stage.rs +++ b/lightningbeam-ui/lightningbeam-editor/src/panes/stage.rs @@ -6,7 +6,7 @@ use eframe::egui; use lightningbeam_core::action::Action; use lightningbeam_core::clip::ClipInstance; -use lightningbeam_core::gpu::{BufferPool, BufferFormat, BufferSpec, Compositor, EffectProcessor, HDR_FORMAT}; +use lightningbeam_core::gpu::{BufferPool, BufferFormat, BufferSpec, Compositor, EffectProcessor, HDR_FORMAT, SrgbToLinearConverter}; use lightningbeam_core::layer::{AnyLayer, AudioLayer, AudioLayerType, VideoLayer, VectorLayer}; use lightningbeam_core::renderer::RenderedLayerType; use super::{DragClipType, NodePath, PaneRenderer, SharedPaneState}; @@ -35,6 +35,8 @@ struct SharedVelloResources { compositor: Compositor, /// Effect processor for GPU shader effects effect_processor: Mutex, + /// sRGB to linear color converter (for Vello output) + srgb_to_linear: SrgbToLinearConverter, } /// Per-instance Vello resources (created for each Stage pane) @@ -202,7 +204,10 @@ impl SharedVelloResources { // Initialize effect processor for GPU shader effects let effect_processor = EffectProcessor::new(device, lightningbeam_core::gpu::HDR_FORMAT); - println!("✅ Vello shared resources initialized (renderer, shaders, HDR compositor, and effect processor)"); + // Initialize sRGB to linear converter for Vello output + let srgb_to_linear = SrgbToLinearConverter::new(device); + + println!("✅ Vello shared resources initialized (renderer, shaders, HDR compositor, effect processor, and color converter)"); Ok(Self { renderer: Arc::new(Mutex::new(renderer)), @@ -215,6 +220,7 @@ impl SharedVelloResources { buffer_pool: Mutex::new(buffer_pool), compositor, effect_processor: Mutex::new(effect_processor), + srgb_to_linear, }) } } @@ -460,11 +466,19 @@ impl egui_wgpu::CallbackTrait for VelloCallback { antialiasing_method: vello::AaConfig::Msaa16, }; + // HDR buffer spec for linear buffers + let hdr_spec = BufferSpec::new(width, height, BufferFormat::Rgba16Float); + // First, render background and composite it // The background scene contains only a rectangle at document bounds, // so we use TRANSPARENT base_color to not fill the whole viewport - let bg_handle = buffer_pool.acquire(device, layer_spec); - if let (Some(bg_view), Some(hdr_view)) = (buffer_pool.get_view(bg_handle), &instance_resources.hdr_texture_view) { + let bg_srgb_handle = buffer_pool.acquire(device, layer_spec); + let bg_hdr_handle = buffer_pool.acquire(device, hdr_spec); + if let (Some(bg_srgb_view), Some(bg_hdr_view), Some(hdr_view)) = ( + buffer_pool.get_view(bg_srgb_handle), + buffer_pool.get_view(bg_hdr_handle), + &instance_resources.hdr_texture_view, + ) { // Render background scene with transparent base (scene has the bg rect) let bg_render_params = vello::RenderParams { base_color: vello::peniko::Color::TRANSPARENT, @@ -474,15 +488,23 @@ impl egui_wgpu::CallbackTrait for VelloCallback { }; if let Ok(mut renderer) = shared.renderer.lock() { - renderer.render_to_texture(device, queue, &composite_result.background, bg_view, &bg_render_params).ok(); + renderer.render_to_texture(device, queue, &composite_result.background, bg_srgb_view, &bg_render_params).ok(); } + // Convert sRGB to linear HDR + let mut convert_encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor { + label: Some("bg_srgb_to_linear_encoder"), + }); + shared.srgb_to_linear.convert(device, &mut convert_encoder, bg_srgb_view, bg_hdr_view); + queue.submit(Some(convert_encoder.finish())); + // Composite background onto HDR texture (first layer, clears to dark gray for stage area) - let bg_compositor_layer = lightningbeam_core::gpu::CompositorLayer::normal(bg_handle, 1.0); + let bg_compositor_layer = lightningbeam_core::gpu::CompositorLayer::normal(bg_hdr_handle, 1.0); let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: Some("bg_composite_encoder"), }); // Clear to dark gray (stage background outside document bounds) + // Note: stage_bg values are already in linear space for HDR compositing let stage_bg = [45.0 / 255.0, 45.0 / 255.0, 48.0 / 255.0, 1.0]; shared.compositor.composite( device, @@ -495,10 +517,8 @@ impl egui_wgpu::CallbackTrait for VelloCallback { ); queue.submit(Some(encoder.finish())); } - buffer_pool.release(bg_handle); - - // HDR buffer spec for effect processing - let hdr_spec = BufferSpec::new(width, height, BufferFormat::Rgba16Float); + buffer_pool.release(bg_srgb_handle); + buffer_pool.release(bg_hdr_handle); // Lock effect processor let mut effect_processor = shared.effect_processor.lock().unwrap(); @@ -511,18 +531,30 @@ impl egui_wgpu::CallbackTrait for VelloCallback { match &rendered_layer.layer_type { RenderedLayerType::Content => { - // Regular content layer - render and composite as before - let layer_handle = buffer_pool.acquire(device, layer_spec); + // Regular content layer - render to sRGB, convert to linear, then composite + let srgb_handle = buffer_pool.acquire(device, layer_spec); + let hdr_layer_handle = buffer_pool.acquire(device, hdr_spec); - if let (Some(layer_view), Some(hdr_view)) = (buffer_pool.get_view(layer_handle), &instance_resources.hdr_texture_view) { - // Render layer scene to buffer + if let (Some(srgb_view), Some(hdr_layer_view), Some(hdr_view)) = ( + buffer_pool.get_view(srgb_handle), + buffer_pool.get_view(hdr_layer_handle), + &instance_resources.hdr_texture_view, + ) { + // Render layer scene to sRGB buffer if let Ok(mut renderer) = shared.renderer.lock() { - renderer.render_to_texture(device, queue, &rendered_layer.scene, layer_view, &layer_render_params).ok(); + renderer.render_to_texture(device, queue, &rendered_layer.scene, srgb_view, &layer_render_params).ok(); } + // Convert sRGB to linear HDR + let mut convert_encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor { + label: Some("layer_srgb_to_linear_encoder"), + }); + shared.srgb_to_linear.convert(device, &mut convert_encoder, srgb_view, hdr_layer_view); + queue.submit(Some(convert_encoder.finish())); + // Composite this layer onto the HDR accumulator with its opacity let compositor_layer = lightningbeam_core::gpu::CompositorLayer::new( - layer_handle, + hdr_layer_handle, rendered_layer.opacity, rendered_layer.blend_mode, ); @@ -542,7 +574,8 @@ impl egui_wgpu::CallbackTrait for VelloCallback { queue.submit(Some(encoder.finish())); } - buffer_pool.release(layer_handle); + buffer_pool.release(srgb_handle); + buffer_pool.release(hdr_layer_handle); } RenderedLayerType::Effect { effect_instances } => { // Effect layer - apply effects to the current HDR accumulator